in_source_id
stringlengths 13
58
| issue
stringlengths 3
241k
| before_files
listlengths 0
3
| after_files
listlengths 0
3
| pr_diff
stringlengths 109
107M
⌀ |
---|---|---|---|---|
gratipay__gratipay.com-1314 | reset.css doesn't load sometimes
@clone1018 saw this when we first started caching static assets. It's why I turned off static caching initially. Now static caching is back with #1245 and indeed we're seeing this again. :(

| [
{
"content": "\"\"\"\nHandles caching of static resources.\n\"\"\"\nimport os\nfrom calendar import timegm\nfrom email.utils import parsedate\nfrom wsgiref.handlers import format_date_time\n\nfrom aspen import Response\n\n\ndef version_is_available(request):\n \"\"\"Return a boolean, whether we have the version they asked for.\n \"\"\"\n path = request.line.uri.path\n version = request.website.version\n return path['version'] == version if 'version' in path else True\n\n\ndef version_is_dash(request):\n \"\"\"Return a boolean, whether the version they asked for is -.\n \"\"\"\n return request.line.uri.path.get('version') == '-'\n\n\ndef get_last_modified(fs_path):\n \"\"\"Get the last modified time, as int, of the file pointed to by fs_path.\n \"\"\"\n return int(os.path.getctime(fs_path))\n\n\ndef inbound(request):\n \"\"\"Try to serve a 304 for resources under assets/.\n \"\"\"\n uri = request.line.uri\n\n if not uri.startswith('/assets/'):\n\n # Only apply to the assets/ directory.\n\n return request\n\n if version_is_dash(request):\n\n # Special-case a version of '-' to never 304/404 here.\n\n return request\n\n if not version_is_available(request):\n\n # Don't serve one version of a file as if it were another.\n\n raise Response(404)\n\n ims = request.headers.get('If-Modified-Since')\n if not ims:\n\n # This client doesn't care about when the file was modified.\n\n return request\n\n if request.fs.endswith('.spt'):\n\n # This is a requests for a dynamic resource. Perhaps in the future\n # we'll delegate to such resources to compute a sensible Last-Modified\n # or E-Tag, but for now we punt. This is okay, because we expect to\n # put our dynamic assets behind a CDN in production.\n\n return request\n\n\n try:\n ims = timegm(parsedate(ims))\n except:\n\n # Malformed If-Modified-Since header. Proceed with the request.\n\n return request\n\n last_modified = get_last_modified(request.fs)\n if ims < last_modified:\n\n # The file has been modified since. Serve the whole thing.\n\n return request\n\n\n # Huzzah!\n # =======\n # We can serve a 304! :D\n\n response = Response(304)\n response.headers['Last-Modified'] = format_date_time(last_modified)\n response.headers['Cache-Control'] = 'no-cache'\n raise response\n\n\ndef outbound(response):\n \"\"\"Set caching headers for resources under assets/.\n \"\"\"\n request = response.request\n website = request.website\n uri = request.line.uri\n\n version = website.version\n response.headers['X-Gittip-Version'] = version\n\n if not uri.startswith('/assets/'):\n return response\n\n response.headers.cookie.clear()\n\n if response.code == 304:\n return response\n\n if website.cache_static:\n\n # https://developers.google.com/speed/docs/best-practices/caching\n response.headers['Cache-Control'] = 'public'\n response.headers['Vary'] = 'accept-encoding'\n\n if 'version' in uri.path:\n # This specific asset is versioned, so it's fine to cache it.\n response.headers['Expires'] = 'Sun, 17 Jan 2038 19:14:07 GMT'\n else:\n # Asset is not versioned. Don't cache it, but set Last-Modified.\n last_modified = get_last_modified(request.fs)\n response.headers['Last-Modified'] = format_date_time(last_modified)\n",
"path": "gittip/cache_static.py"
}
] | [
{
"content": "\"\"\"\nHandles caching of static resources.\n\"\"\"\nimport os\nfrom calendar import timegm\nfrom email.utils import parsedate\nfrom wsgiref.handlers import format_date_time\n\nfrom aspen import Response\n\n\ndef version_is_available(request):\n \"\"\"Return a boolean, whether we have the version they asked for.\n \"\"\"\n path = request.line.uri.path\n version = request.website.version\n return path['version'] == version if 'version' in path else True\n\n\ndef version_is_dash(request):\n \"\"\"Return a boolean, whether the version they asked for is -.\n \"\"\"\n return request.line.uri.path.get('version') == '-'\n\n\ndef get_last_modified(fs_path):\n \"\"\"Get the last modified time, as int, of the file pointed to by fs_path.\n \"\"\"\n return int(os.path.getctime(fs_path))\n\n\ndef inbound(request):\n \"\"\"Try to serve a 304 for resources under assets/.\n \"\"\"\n uri = request.line.uri\n\n if not uri.startswith('/assets/'):\n\n # Only apply to the assets/ directory.\n\n return request\n\n if version_is_dash(request):\n\n # Special-case a version of '-' to never 304/404 here.\n\n return request\n\n if not version_is_available(request):\n\n # Don't serve one version of a file as if it were another.\n\n raise Response(404)\n\n ims = request.headers.get('If-Modified-Since')\n if not ims:\n\n # This client doesn't care about when the file was modified.\n\n return request\n\n if request.fs.endswith('.spt'):\n\n # This is a requests for a dynamic resource. Perhaps in the future\n # we'll delegate to such resources to compute a sensible Last-Modified\n # or E-Tag, but for now we punt. This is okay, because we expect to\n # put our dynamic assets behind a CDN in production.\n\n return request\n\n\n try:\n ims = timegm(parsedate(ims))\n except:\n\n # Malformed If-Modified-Since header. Proceed with the request.\n\n return request\n\n last_modified = get_last_modified(request.fs)\n if ims < last_modified:\n\n # The file has been modified since. Serve the whole thing.\n\n return request\n\n\n # Huzzah!\n # =======\n # We can serve a 304! :D\n\n response = Response(304)\n response.headers['Last-Modified'] = format_date_time(last_modified)\n response.headers['Cache-Control'] = 'no-cache'\n raise response\n\n\ndef outbound(response):\n \"\"\"Set caching headers for resources under assets/.\n \"\"\"\n request = response.request\n website = request.website\n uri = request.line.uri\n\n version = website.version\n response.headers['X-Gittip-Version'] = version\n\n if not uri.startswith('/assets/'):\n return response\n\n response.headers.cookie.clear()\n\n if response.code == 304:\n\n # https://github.com/gittip/www.gittip.com/issues/1308\n del response.headers['Content-Type']\n\n return response\n\n if website.cache_static:\n\n # https://developers.google.com/speed/docs/best-practices/caching\n response.headers['Cache-Control'] = 'public'\n response.headers['Vary'] = 'accept-encoding'\n\n if 'version' in uri.path:\n # This specific asset is versioned, so it's fine to cache it.\n response.headers['Expires'] = 'Sun, 17 Jan 2038 19:14:07 GMT'\n else:\n # Asset is not versioned. Don't cache it, but set Last-Modified.\n last_modified = get_last_modified(request.fs)\n response.headers['Last-Modified'] = format_date_time(last_modified)\n",
"path": "gittip/cache_static.py"
}
] | diff --git a/gittip/cache_static.py b/gittip/cache_static.py
index 2a467bff33..ceeec951ba 100644
--- a/gittip/cache_static.py
+++ b/gittip/cache_static.py
@@ -111,6 +111,10 @@ def outbound(response):
response.headers.cookie.clear()
if response.code == 304:
+
+ # https://github.com/gittip/www.gittip.com/issues/1308
+ del response.headers['Content-Type']
+
return response
if website.cache_static:
|
conan-io__conan-8965 | [bug] Meson cross-file is not looked up in the conan install-folder
### Environment Details (include every applicable attribute)
* Operating System+version: Linux Ubuntu 20.04.2 LTS
* Compiler+version: x86_64-w64-mingw32 9.3
* Conan version: 1.36.0
* Python version: 3.8.5
### Steps to reproduce (Include if Applicable)
- create a profile for cross compilation Linux to Windows (as from the documentation)
- create a cross-compiled meson project (generator pkg_config and generate MesonToolchain)
- `conan install . -if install` (conan_meson_cross.ini is generated inside the install directory)
- `conan build . -if install` (conan is not found in current directory)
### Logs (Executed commands with output) (Include/Attach if Applicable)
```
vscode ➜ /workspaces/tennisAnalysis (main ✗) $ conan install . -if install
Configuration:
[settings]
arch=x86_64
build_type=Release
compiler=gcc
compiler.libcxx=libstdc++11
compiler.version=9.3
os=Windows
os_build=Linux
[options]
[build_requires]
[env]
AR=x86_64-w64-mingw32-ar
AS=x86_64-w64-mingw32-as
CC=x86_64-w64-mingw32-gcc-posix
CHOST=x86_64-w64-mingw32
CONAN_CMAKE_FIND_ROOT_PATH=/usr/bin/x86_64-w64-mingw32 # Adjust this path # Optional, for CMake to find things in that folder
CONAN_CMAKE_SYSROOT=/usr/bin/x86_64-w64-mingw32 # Adjust this path # Optional, if we want to define sysroot
CXX=x86_64-w64-mingw32-g++-posix
PKG_CONFIG=pkg-config
RANLIB=x86_64-w64-mingw32-ranlib
RC=x86_64-w64-mingw32-windres
STRIP=x86_64-w64-mingw32-strip
WARN: libtiff/4.2.0: requirement libwebp/1.1.0 overridden by opencv/4.5.2 to libwebp/1.2.0
conanfile.py: Installing package
Requirements
eigen/3.3.9 from 'conan-center' - Cache
jasper/2.0.32 from 'conan-center' - Cache
jbig/20160605 from 'conan-center' - Cache
libdeflate/1.7 from 'conan-center' - Cache
libjpeg/9d from 'conan-center' - Cache
libpng/1.6.37 from 'conan-center' - Cache
libtiff/4.2.0 from 'conan-center' - Cache
libwebp/1.2.0 from 'conan-center' - Cache
opencv/4.5.2 from 'conan-center' - Cache
quirc/1.1 from 'conan-center' - Cache
xz_utils/5.2.5 from 'conan-center' - Cache
zlib/1.2.11 from 'conan-center' - Cache
zstd/1.4.8 from 'conan-center' - Cache
Packages
eigen/3.3.9:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9 - Cache
jasper/2.0.32:0b2b79209cb5a733c6f60939a011a2d5b9baba3e - Cache
jbig/20160605:eb359adcb4224cf32a880f4840496998b718e67a - Cache
libdeflate/1.7:344886eda55829e935447d0708e3b993938b32c8 - Cache
libjpeg/9d:344886eda55829e935447d0708e3b993938b32c8 - Cache
libpng/1.6.37:0ff33ddf098055bd06ad25e84c8ac73a7d386ae6 - Cache
libtiff/4.2.0:9a66f421b7e2c46cae4d0544a209f0a41fce4717 - Cache
libwebp/1.2.0:743b5bdc8f8a9eb56cece0880367af1603426c77 - Cache
opencv/4.5.2:3c85fd5b9706d74ca80c0013b88789f0a882a76e - Cache
quirc/1.1:923b659fe22255fc3db85bbda05de841448c924b - Cache
xz_utils/5.2.5:344886eda55829e935447d0708e3b993938b32c8 - Cache
zlib/1.2.11:344886eda55829e935447d0708e3b993938b32c8 - Cache
zstd/1.4.8:344886eda55829e935447d0708e3b993938b32c8 - Cache
Cross-build from 'Linux:x86_64' to 'Windows:x86_64'
Installing (downloading, building) binaries...
eigen/3.3.9: Already installed!
jbig/20160605: Already installed!
jbig/20160605: Appending PATH environment variable: /home/vscode/.conan/data/jbig/20160605/_/_/package/eb359adcb4224cf32a880f4840496998b718e67a/bin
libdeflate/1.7: Already installed!
libjpeg/9d: Already installed!
libwebp/1.2.0: Already installed!
quirc/1.1: Already installed!
xz_utils/5.2.5: Already installed!
zlib/1.2.11: Already installed!
zstd/1.4.8: Already installed!
jasper/2.0.32: Already installed!
libpng/1.6.37: Already installed!
libtiff/4.2.0: Already installed!
opencv/4.5.2: Already installed!
conanfile.py: Generator pkg_config created opencv_core.pc
conanfile.py: Generator pkg_config created opencv_imgproc.pc
conanfile.py: Generator pkg_config created opencv_flann.pc
conanfile.py: Generator pkg_config created opencv_features2d.pc
conanfile.py: Generator pkg_config created opencv_calib3d.pc
conanfile.py: Generator pkg_config created opencv_video.pc
conanfile.py: Generator pkg_config created opencv_video_alias.pc
conanfile.py: Generator pkg_config created opencv_stitching.pc
conanfile.py: Generator pkg_config created opencv_stitching_alias.pc
conanfile.py: Generator pkg_config created opencv_objdetect.pc
conanfile.py: Generator pkg_config created opencv_objdetect_alias.pc
conanfile.py: Generator pkg_config created opencv_imgcodecs.pc
conanfile.py: Generator pkg_config created opencv_videoio.pc
conanfile.py: Generator pkg_config created opencv_highgui.pc
conanfile.py: Generator pkg_config created opencv_highgui_alias.pc
conanfile.py: Generator pkg_config created opencv_calib3d_alias.pc
conanfile.py: Generator pkg_config created opencv_videoio_alias.pc
conanfile.py: Generator pkg_config created opencv_imgcodecs_alias.pc
conanfile.py: Generator pkg_config created opencv_features2d_alias.pc
conanfile.py: Generator pkg_config created opencv_photo.pc
conanfile.py: Generator pkg_config created opencv_photo_alias.pc
conanfile.py: Generator pkg_config created opencv_ml.pc
conanfile.py: Generator pkg_config created opencv_ml_alias.pc
conanfile.py: Generator pkg_config created opencv_imgproc_alias.pc
conanfile.py: Generator pkg_config created opencv_flann_alias.pc
conanfile.py: Generator pkg_config created opencv_core_alias.pc
conanfile.py: Generator pkg_config created opencv.pc
conanfile.py: Generator pkg_config created jasper.pc
conanfile.py: Generator pkg_config created libpng.pc
conanfile.py: Generator pkg_config created libtiff-4.pc
conanfile.py: Generator pkg_config created eigen3.pc
conanfile.py: Generator pkg_config created quirc.pc
conanfile.py: Generator pkg_config created zlib.pc
conanfile.py: Generator pkg_config created libjpeg.pc
conanfile.py: Generator pkg_config created libdeflate.pc
conanfile.py: Generator pkg_config created liblzma.pc
conanfile.py: Generator pkg_config created jbig.pc
conanfile.py: Generator pkg_config created libzstd.pc
conanfile.py: Generator pkg_config created zstd.pc
conanfile.py: Generator pkg_config created libwebp.pc
conanfile.py: Generator pkg_config created libwebpmux.pc
conanfile.py: Generator pkg_config created libwebpdemux.pc
conanfile.py: Generator pkg_config created libwebpdecoder.pc
conanfile.py: Generator txt created conanbuildinfo.txt
conanfile.py: Calling generate()
conanfile.py: Generated conaninfo.txt
conanfile.py: Generated graphinfo
vscode ➜ /workspaces/tennisAnalysis (main ✗) $ conan build . -if install
Using lockfile: '/workspaces/tennisAnalysis/install/conan.lock'
Using cached profile from lockfile
conanfile.py: Calling build()
Could not find any valid candidate for cross files: conan_meson_cross.ini
ERROR: Cannot find specified cross file: conan_meson_cross.ini
ERROR: conanfile.py: Error in build() method, line 42
meson.configure(source_folder="src")
ConanException: Error 1 while executing meson setup --cross-file "conan_meson_cross.ini" "/workspaces/tennisAnalysis/build" "/workspaces/tennisAnalysis/src" -Dprefix="/workspaces/tennisAnalysis/package"
```
| [
{
"content": "import os\n\nfrom conan.tools.build import build_jobs\nfrom conan.tools.meson import MesonToolchain\n\n\nclass Meson(object):\n def __init__(self, conanfile):\n self._conanfile = conanfile\n\n def configure(self, reconfigure=False):\n source_folder = self._conanfile.source_folder\n build_folder = self._conanfile.build_folder\n cmd = \"meson setup\"\n generators_folder = self._conanfile.generators_folder\n cross = os.path.join(generators_folder, MesonToolchain.cross_filename)\n native = os.path.join(generators_folder, MesonToolchain.native_filename)\n if os.path.exists(cross):\n cmd += ' --cross-file \"{}\"'.format(cross)\n else:\n cmd += ' --native-file \"{}\"'.format(native)\n cmd += ' \"{}\" \"{}\"'.format(build_folder, source_folder)\n if self._conanfile.package_folder:\n cmd += ' -Dprefix=\"{}\"'.format(self._conanfile.package_folder)\n if reconfigure:\n cmd += ' --reconfigure'\n self._conanfile.output.info(\"Meson configure cmd: {}\".format(cmd))\n self._conanfile.run(cmd)\n\n def build(self, target=None):\n meson_build_folder = self._conanfile.build_folder\n cmd = 'meson compile -C \"{}\"'.format(meson_build_folder)\n njobs = build_jobs(self._conanfile)\n if njobs:\n cmd += \" -j{}\".format(njobs)\n if target:\n cmd += \" {}\".format(target)\n self._conanfile.output.info(\"Meson build cmd: {}\".format(cmd))\n self._conanfile.run(cmd)\n\n def install(self):\n self.configure(reconfigure=True) # To re-do the destination package-folder\n meson_build_folder = self._conanfile.build_folder\n cmd = 'meson install -C \"{}\"'.format(meson_build_folder)\n self._conanfile.run(cmd)\n\n def test(self):\n meson_build_folder = self._conanfile.build_folder\n cmd = 'meson test -v -C \"{}\"'.format(meson_build_folder)\n # TODO: Do we need vcvars for test?\n # TODO: This should use conanrunenv, but what if meson itself is a build-require?\n self._conanfile.run(cmd)\n",
"path": "conan/tools/meson/meson.py"
}
] | [
{
"content": "import os\n\nfrom conan.tools.build import build_jobs\nfrom conan.tools.meson import MesonToolchain\n\nclass Meson(object):\n def __init__(self, conanfile):\n self._conanfile = conanfile\n\n def configure(self, reconfigure=False):\n source_folder = self._conanfile.source_folder\n build_folder = self._conanfile.build_folder\n cmd = \"meson setup\"\n generators_folder = self._conanfile.generators_folder\n cross = os.path.join(generators_folder, MesonToolchain.cross_filename)\n native = os.path.join(generators_folder, MesonToolchain.native_filename)\n if os.path.exists(cross):\n cmd += ' --cross-file \"{}\"'.format(cross)\n else:\n cmd += ' --native-file \"{}\"'.format(native)\n cmd += ' \"{}\" \"{}\"'.format(build_folder, source_folder)\n if self._conanfile.package_folder:\n cmd += ' -Dprefix=\"{}\"'.format(self._conanfile.package_folder)\n if reconfigure:\n cmd += ' --reconfigure'\n self._conanfile.output.info(\"Meson configure cmd: {}\".format(cmd))\n self._conanfile.run(cmd)\n\n def build(self, target=None):\n meson_build_folder = self._conanfile.build_folder\n cmd = 'meson compile -C \"{}\"'.format(meson_build_folder)\n njobs = build_jobs(self._conanfile)\n if njobs:\n cmd += \" -j{}\".format(njobs)\n if target:\n cmd += \" {}\".format(target)\n self._conanfile.output.info(\"Meson build cmd: {}\".format(cmd))\n self._conanfile.run(cmd)\n\n def install(self):\n self.configure(reconfigure=True) # To re-do the destination package-folder\n meson_build_folder = self._conanfile.build_folder\n cmd = 'meson install -C \"{}\"'.format(meson_build_folder)\n self._conanfile.run(cmd)\n\n def test(self):\n meson_build_folder = self._conanfile.build_folder\n cmd = 'meson test -v -C \"{}\"'.format(meson_build_folder)\n # TODO: Do we need vcvars for test?\n # TODO: This should use conanrunenv, but what if meson itself is a build-require?\n self._conanfile.run(cmd)\n",
"path": "conan/tools/meson/meson.py"
}
] | diff --git a/conan/tools/meson/meson.py b/conan/tools/meson/meson.py
index 1285bab4769..37287caa3e3 100644
--- a/conan/tools/meson/meson.py
+++ b/conan/tools/meson/meson.py
@@ -3,7 +3,6 @@
from conan.tools.build import build_jobs
from conan.tools.meson import MesonToolchain
-
class Meson(object):
def __init__(self, conanfile):
self._conanfile = conanfile
diff --git a/conans/test/functional/toolchains/meson/test_meson.py b/conans/test/functional/toolchains/meson/test_meson.py
index e95ab5492a6..14189857d80 100644
--- a/conans/test/functional/toolchains/meson/test_meson.py
+++ b/conans/test/functional/toolchains/meson/test_meson.py
@@ -21,6 +21,7 @@ def config_options(self):
del self.options.fPIC
def layout(self):
+ self.folders.generators = 'build/gen_folder'
self.folders.build = "build"
def generate(self):
@@ -71,7 +72,7 @@ def test_build(self):
self.t.run("install . %s" % self._settings_str)
- content = self.t.load("conan_meson_native.ini")
+ content = self.t.load(os.path.join("build", "gen_folder", "conan_meson_native.ini"))
self.assertIn("[project options]", content)
self.assertIn("STRING_DEFINITION = 'Text'", content)
|
bokeh__bokeh-1434 | Tools get lost on Grid Plots
JS logic error prevents all tools from showing up in the toolbar. (cf. comment in #1342)
| [
{
"content": "import numpy as np\n\nfrom bokeh.plotting import *\n\nN = 50\n\nx = np.linspace(0, 4*np.pi, N)\ny = np.sin(x)\n\nTOOLS = \"pan,wheel_zoom,box_zoom,reset,save\"\n\nl = figure(title=\"line\", tools=TOOLS)\nl.line(x,y, line_width=3, color=\"gold\")\n\naw = figure(title=\"annular wedge\", tools=TOOLS)\naw.annular_wedge(x, y, 10, 20, 0.6, 4.1, color=\"navy\", alpha=0.5,\n inner_radius_units=\"screen\", outer_radius_units=\"screen\")\n\nbez = figure(title=\"bezier\", tools=TOOLS)\nbez.bezier(x, y, x+0.4, y, x+0.1, y+0.2, x-0.1, y-0.2,\n line_width=2, color=\"olive\")\n\nq = figure(title=\"quad\", tools=TOOLS)\nq.quad(x, x-0.2, y, y-0.2, color=\"tomato\", alpha=0.4)\n\np = gridplot([[l,aw],[bez,q]])\n\noutput_file(\"grid.html\", title=\"grid.py example\")\nshow(p)\n",
"path": "examples/plotting/file/grid.py"
}
] | [
{
"content": "import numpy as np\n\nfrom bokeh.plotting import *\n\nN = 50\n\nx = np.linspace(0, 4*np.pi, N)\ny = np.sin(x)\n\nTOOLS = \"pan,wheel_zoom,box_zoom,reset,save,crosshair\"\n\nl = figure(title=\"line\", tools=TOOLS)\nl.line(x,y, line_width=3, color=\"gold\")\n\naw = figure(title=\"annular wedge\", tools=TOOLS)\naw.annular_wedge(x, y, 10, 20, 0.6, 4.1, color=\"navy\", alpha=0.5,\n inner_radius_units=\"screen\", outer_radius_units=\"screen\")\n\nbez = figure(title=\"bezier\", tools=TOOLS)\nbez.bezier(x, y, x+0.4, y, x+0.1, y+0.2, x-0.1, y-0.2,\n line_width=2, color=\"olive\")\n\nq = figure(title=\"quad\", tools=TOOLS)\nq.quad(x, x-0.2, y, y-0.2, color=\"tomato\", alpha=0.4)\n\np = gridplot([[l,aw],[bez,q]])\n\noutput_file(\"grid.html\", title=\"grid.py example\")\nshow(p)\n",
"path": "examples/plotting/file/grid.py"
}
] | diff --git a/bokehjs/src/coffee/common/grid_plot.coffee b/bokehjs/src/coffee/common/grid_plot.coffee
index 35c63e52e17..176e016cea6 100644
--- a/bokehjs/src/coffee/common/grid_plot.coffee
+++ b/bokehjs/src/coffee/common/grid_plot.coffee
@@ -15,6 +15,25 @@ define [
logger = Logging.logger
class _ToolProxy extends Backbone.Model
+
+ initialize: (options) ->
+ super(options)
+ # OK this is pretty lame but should work until we make a new
+ # better grid plot. This just mimics all the events that
+ # any of the tool types might expect to get.
+ @listenTo(@, 'do', @do)
+ return null
+
+ do: () ->
+ for tool in @attributes.tools
+ tool.trigger('do')
+ return null
+
+ active: () ->
+ for tool in @attributes.tools
+ tool.set('active', @get('active'))
+ return null
+
attrs_and_props: () ->
return @attributes.tools[0].attrs_and_props()
@@ -25,6 +44,7 @@ define [
super(attr, value)
for tool in @attributes.tools
tool.set(attr, value)
+ return null
class GridToolManager extends ToolManager.Model
@@ -50,7 +70,7 @@ define [
inspectors[tool.type] = []
inspectors[tool.type].push(tool)
- for tool in @get('actions')
+ for tool in tm.get('actions')
if tool.type not of actions
actions[tool.type] = []
actions[tool.type].push(tool)
@@ -67,13 +87,17 @@ define [
if tools.length != @get('num_plots')
continue
proxy = new _ToolProxy({tools: tools})
- @get('actions').push(proxy)
+ tmp = @get('actions')
+ tmp.push(proxy)
+ @set('actions', tmp)
for typ, tools of inspectors
if tools.length != @get('num_plots')
continue
proxy = new _ToolProxy({tools: tools})
- @get('inspectors').push(proxy)
+ tmp = @get('inspectors')
+ tmp.push(proxy)
+ @set('inspectors', tmp)
for et, info of @get('gestures')
tools = info.tools
diff --git a/bokehjs/src/coffee/common/toolbar_template.eco b/bokehjs/src/coffee/common/toolbar_template.eco
index 88140ace521..d9fa0dfc75a 100644
--- a/bokehjs/src/coffee/common/toolbar_template.eco
+++ b/bokehjs/src/coffee/common/toolbar_template.eco
@@ -13,9 +13,10 @@
<button class="bk-toolbar-button help" title="Help">
<img class="bk-btn-icon" src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAA2hpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMC1jMDYxIDY0LjE0MDk0OSwgMjAxMC8xMi8wNy0xMDo1NzowMSAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VSZWYjIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDo3NzIwRUFGMDYyMjE2ODExOTdBNUNBNjVEQTY5OTRDRSIgeG1wTU06RG9jdW1lbnRJRD0ieG1wLmRpZDozMjFERDhDRjIwQjIxMUU0ODREQUYzNzM5QTM2MjBCRSIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDozMjFERDhDRTIwQjIxMUU0ODREQUYzNzM5QTM2MjBCRSIgeG1wOkNyZWF0b3JUb29sPSJBZG9iZSBQaG90b3Nob3AgQ1M1LjEgTWFjaW50b3NoIj4gPHhtcE1NOkRlcml2ZWRGcm9tIHN0UmVmOmluc3RhbmNlSUQ9InhtcC5paWQ6OTdFQUZCRjQ4NjIxNjgxMTk3QTVDQTY1REE2OTk0Q0UiIHN0UmVmOmRvY3VtZW50SUQ9InhtcC5kaWQ6NzcyMEVBRjA2MjIxNjgxMTk3QTVDQTY1REE2OTk0Q0UiLz4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz6QBYrgAAABb0lEQVR42ozTwUeEQRjH8W3bKEs6JkpESqfVpS4RHaJL2b2sLepUh3To0KFVIjp0iegQUdG21WmVvUWsKBGRlIiU7Q+IVaT0ffi9eY33peFj7cw8zzvzzEzV7v5hxGltGEUvutR3hwvs4ck/OeoET6KACrJolaz6Cprz12L6rcEOajGEFyfxtRxhDX0Yx5e3ggV8IqlgW/4J3vCKddRrLKm5894KOjGCHiVrwDZ+sKKvpRQ0pzkzuETeEqSxpT1aa8ezCmbyuEW3b0sVxaQtQT+mfINXGPT9T+n3xqnLKTYsQQseI8FtWnstYdEZs9o0eqdQZxUNSWD9YwHj36i2UyijOWQFVvmPkOR2P8qW4AwDIQma0BEyZjGlqCo9gXjA1+0ePAQExxWTtwT3KOqG/bfZ3GOL9W7ikrIe6FSsvQdswcZymrvsf0xWpIzqYauZRcIXmFBfUUea8Qobc5a2qQtiz3nVec7nGHaf868AAwDKW1RIPmvhEQAAAABJRU5ErkJggg==">
<span class="tip">
- <span>For Pan tool, left click and drag.<br/><br/>
- For Wheel Zoom, hold shift and scroll or select the Wheel Zoom tool and scroll.<br/><br/></span>
- <a href="http://bokeh.pydata.org" target="_blank">Learn More</a>
+ <span>For Pan tool, left click and drag.
+ For Wheel Zoom, hold shift and scroll or select the Wheel Zoom tool and scroll.
+ </span>
+ <a href="http://bokeh.pydata.org" target="_blank">Learn More</a>
</span>
</button>
</li>
diff --git a/bokehjs/src/less/continuum.less b/bokehjs/src/less/continuum.less
index 02816a07e21..56da7c130fd 100644
--- a/bokehjs/src/less/continuum.less
+++ b/bokehjs/src/less/continuum.less
@@ -1,3 +1,5 @@
+/* table element cleanup */
+
.tableelem {
padding: 2px 10px;
border: 2px white;
@@ -104,6 +106,18 @@
.bk-button-bar .bk-bs-dropdown {
padding: 10px 10px 0 5px;
+ &:before {
+ content: "\25BE";
+ font-size: 90%;
+ color: lightgray;
+ display: inline-block;
+ height: 10px;
+ line-height: 10px;
+ width: 10px;
+ position: absolute;
+ right: 0px;
+ top: 10px;
+ }
a {
color: transparent;
font-size: 0;
@@ -111,7 +125,7 @@
float: left;
width: 16px;
height: 16px;
- margin: 10px 5px 5px 5px;
+ margin: 10px 5px 5px 3px;
background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAABx0RVh0U29mdHdhcmUAQWRvYmUgRmlyZXdvcmtzIENTNui8sowAAAHeSURBVDiNpZPPS1RRFMc/5z6FLLKlQQ1IQi0y0L/AsF+00rC6M3eQN61buDDatWoXuRBahs5zeM/uiCi0aTKE2ktSrttY0NoIFzpzWnTHxscQRF/4Ls453++5nHM5Qhcspv62GByqlwAjIp+0xXKlbDfyWukMqqkfAeZEGAe+Av2h9AM4p7CJMFsp2e22xxyZMz8hwpYIg8Dd2NkCsAasxc6eB0oCg6JsVTM/caxBkvphgXWgoeho7OxqqPcGEjv7StFRoCGwXk39MICpZT4CXgC7ik5VXHGvY6pTgQBUXHFP0SlUd0WYr2UrPaYJ1xHGgJmKK+7ndjQbSEeTfUQeAeNNWmOSpD5FuBY7eza/4b8hyfx3VBs9CBeBviTzS0AU6k9iZ7/kDBeApyFsAicQudz+Bf2X1zvxvyO8MSosAQNJ5u90EQ0lmR/qkr8PDKhIzUTwTuGDwvzLlXpfTjsXeISFev0k8FxhM8K8N9PONkV5KFDoPdDVBe/7O/Q/A3+b/fKZ6FBfAwVRZqbdvUMDEJftDqKTwK2oyXY19TZ4DgKppt6apvkIXEV0Mi7bHcgdU5L6ERWeCdwAvgGnQ6l9TBuiPI7Lf47pWIM2FjN/00AJuBJSn1uw/MDZt3ntLzC5tBIPCBGPAAAAAElFTkSuQmCC);
}
}
@@ -175,7 +189,6 @@
padding: 0 3px;
}
}
-
.bk-button-bar .bk-button-bar-list .bk-bs-dropdown-menu {
padding: 10px 8px;
}
@@ -242,9 +255,7 @@ li:hover .bk-toolbar-button span.tip {
-moz-border-radius: 3px !important;
-webkit-border-radius: 3px !important;
display: inline-block;
- //position: absolute;
position: relative;
- //top: 40px;
top: 25px;
padding: 3px 5px;
transition: all 0.6s ease;
@@ -255,6 +266,10 @@ li:hover .bk-toolbar-button span.tip {
display: block;
text-align: left;
}
+ span {
+ width: 200px;
+ white-space: normal;
+ }
}
.bk-button-bar-list .bk-toolbar-button.active {
@@ -265,6 +280,17 @@ li:hover .bk-toolbar-button span.tip {
outline: none !important;
border-bottom: 2px solid #26aae1;
}
+
+
+
+.bk-button-bar > .bk-toolbar-button.active {
+ border-bottom: 2px solid #26aae1;
+}
+
+
+
+/* css specific to toolbar orientation (above, below)*/
+
.bk-plot-above.bk-toolbar-active {
border-bottom: 2px solid #e5e5e5;
}
@@ -283,12 +309,29 @@ li:hover .bk-toolbar-button span.tip {
top: 5px;
.bk-button-bar-list {
float: left;
+ &.bk-bs-dropdown {
+ margin-right: 20px; //create room for dropdown chevron
+ &:before {
+ right: -6px;
+ }
+ &:after {
+ right: -12px;
+ position: absolute;
+ }
+ }
+ .bk-bs-dropdown-menu {
+ &:after {
+ content: "";
+ }
+ }
}
.bk-toolbar-button {
float: left;
&.help {
float: right;
span.tip {
+ right: 0;
+ text-align: left;
width: 200px;
white-space: normal;
> * {
@@ -318,14 +361,14 @@ li:hover .bk-toolbar-button span.tip {
}
}
-
+/* css specific to toolbar orientation (left, right)*/
.bk-plot-left.bk-toolbar-active{
border-right: 2px solid #e5e5e5;
}
.bk-plot-right.bk-toolbar-active {
border-left: 2px solid #e5e5e5;
}
-.bk-plot-left, .bk-plot-right {
+.bk-plot-left.bk-toolbar-active, .bk-plot-right.bk-toolbar-active {
display: block;
margin: 45px 0 0 0;
.bk-logo {
@@ -341,11 +384,33 @@ li:hover .bk-toolbar-button span.tip {
clear: both;
}
.bk-button-bar-list {
- li {
+ &:after {
+ content: " ";
+ height: 0px;
+ display: block;
+ clear: both;
+ }
+ &.bk-bs-dropdown {
+ &:before {
+ top: ;
+ }
+ &:after {
+ content: " \2014";
+ float: none;
+ clear: both;
+ display: block;
+ width: 30px;
+ height: 8px;
+ line-height: 8px;
+ padding: 3px 0;
+ text-align: center;
+ }
+ }
+ > li {
clear: both;
&:last-child {
&:after {
- content: "-";
+ content: " \2014";
float: none;
clear: both;
display: block;
@@ -356,40 +421,40 @@ li:hover .bk-toolbar-button span.tip {
text-align: center;
}
}
+
.bk-toolbar-button {
&.active {
border-bottom: none;
border-right: 2px solid #26aae1;
}
- span.tip {
- top: 4px;
- left: 40px;
- padding: 5px 10px 5px 10px;
- &:before {
- top: 2px;
- left: -19px;
- width: 9px;
- height: 15px;
- background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAkAAAAPCAMAAAABFhU/AAAAA3NCSVQICAjb4U/gAAAAY1BMVEX////////8/Pz5+fn39/f19fX09PTv8fHv7+/t7e7s7Ozp6enn6Onm5ubj4+Ph4eHf39/X2drW1tfMzMzAw8S+wMGusbKorK6orK2nq6ufo6WcoaGYnZ+RlpiJj5GGjI6Bh4n1ho2QAAAAIXRSTlMA//////////////////////////////////////////9G9E6kAAAACXBIWXMAAAsSAAALEgHS3X78AAAAHHRFWHRTb2Z0d2FyZQBBZG9iZSBGaXJld29ya3MgQ1M26LyyjAAAABR0RVh0Q3JlYXRpb24gVGltZQA5LzUvMTTY+fXxAAAAUklEQVQImTXN2xZAIABE0VQUIfdLwvz/V1rL1DztpzOi4EoIQoekNoIaH1AL8EvvoExEUkBWfWZZvyWVzq/vL6kbP9/sKdtPF8vKdMPBN1m5AR+0BAnD6uP50QAAAABJRU5ErkJggg==);
+ &.help {
+ span.tip {
+ &:before {
+ left: -57%;
+ }
}
}
+ span.tip {
+ position: absolute;
+ top: 4px;
+ left: 40px;
+ padding: 5px 10px 5px 10px;
+ &:before {
+ top: 2px;
+ left: -19px;
+ width: 9px;
+ height: 15px;
+ background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAkAAAAPCAMAAAABFhU/AAAAA3NCSVQICAjb4U/gAAAAY1BMVEX////////8/Pz5+fn39/f19fX09PTv8fHv7+/t7e7s7Ozp6enn6Onm5ubj4+Ph4eHf39/X2drW1tfMzMzAw8S+wMGusbKorK6orK2nq6ufo6WcoaGYnZ+RlpiJj5GGjI6Bh4n1ho2QAAAAIXRSTlMA//////////////////////////////////////////9G9E6kAAAACXBIWXMAAAsSAAALEgHS3X78AAAAHHRFWHRTb2Z0d2FyZQBBZG9iZSBGaXJld29ya3MgQ1M26LyyjAAAABR0RVh0Q3JlYXRpb24gVGltZQA5LzUvMTTY+fXxAAAAUklEQVQImTXN2xZAIABE0VQUIfdLwvz/V1rL1DztpzOi4EoIQoekNoIaH1AL8EvvoExEUkBWfWZZvyWVzq/vL6kbP9/sKdtPF8vKdMPBN1m5AR+0BAnD6uP50QAAAABJRU5ErkJggg==);
+ }
+ }
+
}
}
+
}
}
}
-.bk-toolbar-button.help {
- > span.tip {
- right: 0;
- text-align: left;
- }
-}
-
-.bk-button-bar > .bk-toolbar-button.active {
- border-bottom: 2px solid #26aae1;
-}
-
.bk-crossfilter-selector {
list-style-type: none;
-webkit-padding-start: 0
diff --git a/examples/plotting/file/grid.py b/examples/plotting/file/grid.py
index 4cd4fb40ce6..1f1f61e734d 100644
--- a/examples/plotting/file/grid.py
+++ b/examples/plotting/file/grid.py
@@ -7,7 +7,7 @@
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
-TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
+TOOLS = "pan,wheel_zoom,box_zoom,reset,save,crosshair"
l = figure(title="line", tools=TOOLS)
l.line(x,y, line_width=3, color="gold")
|
frappe__frappe-4871 | `Insufficient Permission for Chat Profile` on load
Use Case:
1. Create New User without System Manager Role
2. Log-in to System
3. Error Pops-up when system is trying to create Chat Profile.
I tried replicating it to a user that was once has a System Manager role and it was not popping up. It was because the user has already Chat Profile.
Here is the log. Hope this helps!

Any help is greatly appreciated. I will also try to create an issue for this here in discuss.
Thanks,
Dori
| [
{
"content": "# imports - module imports\nfrom frappe.model.document import Document\nfrom frappe import _\nimport frappe\n\n# imports - frappe module imports\nfrom frappe.core.doctype.version.version import get_diff\nfrom frappe.chat.doctype.chat_room import chat_room\nfrom frappe.chat.util import (\n safe_json_loads,\n filter_dict,\n dictify\n)\n\nsession = frappe.session\n\nclass ChatProfile(Document):\n def before_save(self):\n if not self.is_new():\n self.get_doc_before_save()\n\n def on_update(self):\n if not self.is_new():\n b, a = self.get_doc_before_save(), self\n diff = dictify(get_diff(a, b))\n if diff:\n user = session.user\n\n fields = [changed[0] for changed in diff.changed]\n\n if 'status' in fields:\n rooms = chat_room.get(user, filters = ['Chat Room', 'type', '=', 'Direct'])\n update = dict(user = user, data = dict(status = self.status))\n\n for room in rooms:\n frappe.publish_realtime('frappe.chat.profile:update', update, room = room.name, after_commit = True)\n\n if 'enable_chat' in fields:\n update = dict(user = user, data = dict(enable_chat = bool(self.enable_chat)))\n frappe.publish_realtime('frappe.chat.profile:update', update, user = user, after_commit = True)\n\ndef authenticate(user):\n if user != session.user:\n frappe.throw(_(\"Sorry, you're not authorized.\"))\n\[email protected]()\ndef get(user, fields = None):\n duser = frappe.get_doc('User', user)\n dprof = frappe.get_doc('Chat Profile', user)\n\n # If you're adding something here, make sure the client recieves it.\n profile = dict(\n # User\n name = duser.name,\n email = duser.email,\n first_name = duser.first_name,\n last_name = duser.last_name,\n username = duser.username,\n avatar = duser.user_image,\n bio = duser.bio,\n # Chat Profile\n status = dprof.status,\n chat_background = dprof.chat_background,\n message_preview = bool(dprof.message_preview),\n notification_tones = bool(dprof.notification_tones),\n conversation_tones = bool(dprof.conversation_tones),\n enable_chat = bool(dprof.enable_chat)\n )\n profile = filter_dict(profile, fields)\n\n return dictify(profile)\n\[email protected]()\ndef create(user, exists_ok = False, fields = None):\n authenticate(user)\n\n exists_ok, fields = safe_json_loads(exists_ok, fields)\n\n if frappe.db.exists('Chat Profile', user):\n if not exists_ok:\n frappe.throw(_('Chat Profile for User {user} exists.'.format(user = user)))\n else:\n dprof = frappe.new_doc('Chat Profile')\n dprof.user = user\n dprof.save()\n\n profile = get(user, fields = fields)\n\n return profile\n\[email protected]()\ndef update(user, data):\n authenticate(user)\n\n data = safe_json_loads(data)\n\n dprof = frappe.get_doc('Chat Profile', user)\n dprof.update(data)\n dprof.save(ignore_permissions = True)",
"path": "frappe/chat/doctype/chat_profile/chat_profile.py"
}
] | [
{
"content": "# imports - module imports\nfrom frappe.model.document import Document\nfrom frappe import _\nimport frappe\n\n# imports - frappe module imports\nfrom frappe.core.doctype.version.version import get_diff\nfrom frappe.chat.doctype.chat_room import chat_room\nfrom frappe.chat.util import (\n safe_json_loads,\n filter_dict,\n dictify\n)\n\nsession = frappe.session\n\nclass ChatProfile(Document):\n def before_save(self):\n if not self.is_new():\n self.get_doc_before_save()\n\n def on_update(self):\n if not self.is_new():\n b, a = self.get_doc_before_save(), self\n diff = dictify(get_diff(a, b))\n if diff:\n user = session.user\n\n fields = [changed[0] for changed in diff.changed]\n\n if 'status' in fields:\n rooms = chat_room.get(user, filters = ['Chat Room', 'type', '=', 'Direct'])\n update = dict(user = user, data = dict(status = self.status))\n\n for room in rooms:\n frappe.publish_realtime('frappe.chat.profile:update', update, room = room.name, after_commit = True)\n\n if 'enable_chat' in fields:\n update = dict(user = user, data = dict(enable_chat = bool(self.enable_chat)))\n frappe.publish_realtime('frappe.chat.profile:update', update, user = user, after_commit = True)\n\ndef authenticate(user):\n if user != session.user:\n frappe.throw(_(\"Sorry, you're not authorized.\"))\n\[email protected]()\ndef get(user, fields = None):\n duser = frappe.get_doc('User', user)\n dprof = frappe.get_doc('Chat Profile', user)\n\n # If you're adding something here, make sure the client recieves it.\n profile = dict(\n # User\n name = duser.name,\n email = duser.email,\n first_name = duser.first_name,\n last_name = duser.last_name,\n username = duser.username,\n avatar = duser.user_image,\n bio = duser.bio,\n # Chat Profile\n status = dprof.status,\n chat_background = dprof.chat_background,\n message_preview = bool(dprof.message_preview),\n notification_tones = bool(dprof.notification_tones),\n conversation_tones = bool(dprof.conversation_tones),\n enable_chat = bool(dprof.enable_chat)\n )\n profile = filter_dict(profile, fields)\n\n return dictify(profile)\n\[email protected]()\ndef create(user, exists_ok = False, fields = None):\n authenticate(user)\n\n exists_ok, fields = safe_json_loads(exists_ok, fields)\n\n if frappe.db.exists('Chat Profile', user):\n if not exists_ok:\n frappe.throw(_('Chat Profile for User {user} exists.'.format(user = user)))\n else:\n dprof = frappe.new_doc('Chat Profile')\n dprof.user = user\n dprof.save(ignore_permissions = True)\n\n profile = get(user, fields = fields)\n\n return profile\n\[email protected]()\ndef update(user, data):\n authenticate(user)\n\n data = safe_json_loads(data)\n\n dprof = frappe.get_doc('Chat Profile', user)\n dprof.update(data)\n dprof.save(ignore_permissions = True)",
"path": "frappe/chat/doctype/chat_profile/chat_profile.py"
}
] | diff --git a/frappe/chat/doctype/chat_profile/chat_profile.py b/frappe/chat/doctype/chat_profile/chat_profile.py
index d418caa7dead..b876204bef38 100644
--- a/frappe/chat/doctype/chat_profile/chat_profile.py
+++ b/frappe/chat/doctype/chat_profile/chat_profile.py
@@ -82,7 +82,7 @@ def create(user, exists_ok = False, fields = None):
else:
dprof = frappe.new_doc('Chat Profile')
dprof.user = user
- dprof.save()
+ dprof.save(ignore_permissions = True)
profile = get(user, fields = fields)
|
getmoto__moto-1400 | mock_xray_client cannot be used as a context manager
PR #1255 added support for `aws_xray_sdk` which is great.
But there is a problem with it: `moto.mock_xray_client` is *only* a function decorator, and unlike all other `mock_*` methods it cannot be used as a context manager or directly with `start()`...`stop()`.
As a result, it is not possible to write a `py.test` fixture which would add support for mocking `xray_client`.
Also, `mock_xray_client` does not return the result of the function it decorates. Given it is meant to be used to decorate test functions it is most likely not a big issue, but I think it is still worth fixing.
I will prepare a PR for the return value issue soon.
Also I am thinking about refactoring `mock_xray_client` to base it on the existing infrastructure (`BaseBackend`, `base_decorator`) but am not yet enough familiar with `moto` internals to be sure which would be the best way to implement it.
Installed version: `moto-ext==1.1.25`
The problem seemingly persists in current `master` branch.
| [
{
"content": "from functools import wraps\nimport os\nfrom moto.xray import xray_backends\nimport aws_xray_sdk.core\nfrom aws_xray_sdk.core.context import Context as AWSContext\nfrom aws_xray_sdk.core.emitters.udp_emitter import UDPEmitter\n\n\nclass MockEmitter(UDPEmitter):\n \"\"\"\n Replaces the code that sends UDP to local X-Ray daemon\n \"\"\"\n def __init__(self, daemon_address='127.0.0.1:2000'):\n address = os.getenv('AWS_XRAY_DAEMON_ADDRESS_YEAH_NOT_TODAY_MATE', daemon_address)\n self._ip, self._port = self._parse_address(address)\n\n def _xray_backend(self, region):\n return xray_backends[region]\n\n def send_entity(self, entity):\n # Hack to get region\n # region = entity.subsegments[0].aws['region']\n # xray = self._xray_backend(region)\n\n # TODO store X-Ray data, pretty sure X-Ray needs refactor for this\n pass\n\n def _send_data(self, data):\n raise RuntimeError('Should not be running this')\n\n\ndef mock_xray_client(f):\n \"\"\"\n Mocks the X-Ray sdk by pwning its evil singleton with our methods\n\n The X-Ray SDK has normally been imported and `patched()` called long before we start mocking.\n This means the Context() will be very unhappy if an env var isnt present, so we set that, save\n the old context, then supply our new context.\n We also patch the Emitter by subclassing the UDPEmitter class replacing its methods and pushing\n that itno the recorder instance.\n \"\"\"\n @wraps(f)\n def _wrapped(*args, **kwargs):\n print(\"Starting X-Ray Patch\")\n\n old_xray_context_var = os.environ.get('AWS_XRAY_CONTEXT_MISSING')\n os.environ['AWS_XRAY_CONTEXT_MISSING'] = 'LOG_ERROR'\n old_xray_context = aws_xray_sdk.core.xray_recorder._context\n old_xray_emitter = aws_xray_sdk.core.xray_recorder._emitter\n aws_xray_sdk.core.xray_recorder._context = AWSContext()\n aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()\n\n try:\n f(*args, **kwargs)\n finally:\n\n if old_xray_context_var is None:\n del os.environ['AWS_XRAY_CONTEXT_MISSING']\n else:\n os.environ['AWS_XRAY_CONTEXT_MISSING'] = old_xray_context_var\n\n aws_xray_sdk.core.xray_recorder._emitter = old_xray_emitter\n aws_xray_sdk.core.xray_recorder._context = old_xray_context\n\n return _wrapped\n\n\nclass XRaySegment(object):\n \"\"\"\n XRay is request oriented, when a request comes in, normally middleware like django (or automatically in lambda) will mark\n the start of a segment, this stay open during the lifetime of the request. During that time subsegments may be generated\n by calling other SDK aware services or using some boto functions. Once the request is finished, middleware will also stop\n the segment, thus causing it to be emitted via UDP.\n\n During testing we're going to have to control the start and end of a segment via context managers.\n \"\"\"\n def __enter__(self):\n aws_xray_sdk.core.xray_recorder.begin_segment(name='moto_mock', traceid=None, parent_id=None, sampling=1)\n\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n aws_xray_sdk.core.xray_recorder.end_segment()\n",
"path": "moto/xray/mock_client.py"
}
] | [
{
"content": "from functools import wraps\nimport os\nfrom moto.xray import xray_backends\nimport aws_xray_sdk.core\nfrom aws_xray_sdk.core.context import Context as AWSContext\nfrom aws_xray_sdk.core.emitters.udp_emitter import UDPEmitter\n\n\nclass MockEmitter(UDPEmitter):\n \"\"\"\n Replaces the code that sends UDP to local X-Ray daemon\n \"\"\"\n def __init__(self, daemon_address='127.0.0.1:2000'):\n address = os.getenv('AWS_XRAY_DAEMON_ADDRESS_YEAH_NOT_TODAY_MATE', daemon_address)\n self._ip, self._port = self._parse_address(address)\n\n def _xray_backend(self, region):\n return xray_backends[region]\n\n def send_entity(self, entity):\n # Hack to get region\n # region = entity.subsegments[0].aws['region']\n # xray = self._xray_backend(region)\n\n # TODO store X-Ray data, pretty sure X-Ray needs refactor for this\n pass\n\n def _send_data(self, data):\n raise RuntimeError('Should not be running this')\n\n\ndef mock_xray_client(f):\n \"\"\"\n Mocks the X-Ray sdk by pwning its evil singleton with our methods\n\n The X-Ray SDK has normally been imported and `patched()` called long before we start mocking.\n This means the Context() will be very unhappy if an env var isnt present, so we set that, save\n the old context, then supply our new context.\n We also patch the Emitter by subclassing the UDPEmitter class replacing its methods and pushing\n that itno the recorder instance.\n \"\"\"\n @wraps(f)\n def _wrapped(*args, **kwargs):\n print(\"Starting X-Ray Patch\")\n\n old_xray_context_var = os.environ.get('AWS_XRAY_CONTEXT_MISSING')\n os.environ['AWS_XRAY_CONTEXT_MISSING'] = 'LOG_ERROR'\n old_xray_context = aws_xray_sdk.core.xray_recorder._context\n old_xray_emitter = aws_xray_sdk.core.xray_recorder._emitter\n aws_xray_sdk.core.xray_recorder._context = AWSContext()\n aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()\n\n try:\n return f(*args, **kwargs)\n finally:\n\n if old_xray_context_var is None:\n del os.environ['AWS_XRAY_CONTEXT_MISSING']\n else:\n os.environ['AWS_XRAY_CONTEXT_MISSING'] = old_xray_context_var\n\n aws_xray_sdk.core.xray_recorder._emitter = old_xray_emitter\n aws_xray_sdk.core.xray_recorder._context = old_xray_context\n\n return _wrapped\n\n\nclass XRaySegment(object):\n \"\"\"\n XRay is request oriented, when a request comes in, normally middleware like django (or automatically in lambda) will mark\n the start of a segment, this stay open during the lifetime of the request. During that time subsegments may be generated\n by calling other SDK aware services or using some boto functions. Once the request is finished, middleware will also stop\n the segment, thus causing it to be emitted via UDP.\n\n During testing we're going to have to control the start and end of a segment via context managers.\n \"\"\"\n def __enter__(self):\n aws_xray_sdk.core.xray_recorder.begin_segment(name='moto_mock', traceid=None, parent_id=None, sampling=1)\n\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n aws_xray_sdk.core.xray_recorder.end_segment()\n",
"path": "moto/xray/mock_client.py"
}
] | diff --git a/moto/xray/mock_client.py b/moto/xray/mock_client.py
index 6e2164d6378a..135796054a68 100644
--- a/moto/xray/mock_client.py
+++ b/moto/xray/mock_client.py
@@ -51,7 +51,7 @@ def _wrapped(*args, **kwargs):
aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()
try:
- f(*args, **kwargs)
+ return f(*args, **kwargs)
finally:
if old_xray_context_var is None:
|
google__clusterfuzz-2844 | add libgcc_s.so.1 to stacktrace ignore
e.g. https://clusterfuzz.com/testcase-detail/6316990573379584
| [
{
"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Stack parsing constants.\"\"\"\n\nimport re\n\nfrom clusterfuzz._internal.crash_analysis.stack_parsing import stack_parser\n\nC_CPP_EXTENSIONS = ['c', 'cc', 'cpp', 'cxx', 'h', 'hh', 'hpp', 'hxx']\n\n# Patterns which cannot be compiled directly, or which are used for direct\n# comparison.\nCHECK_FAILURE_PATTERN = r'Check failed: '\nJNI_ERROR_STRING = r'JNI DETECTED ERROR IN APPLICATION:'\n\n# Common log prefix format for Google fatal logs.\nGOOGLE_LOG_FATAL_PREFIX = r'^F\\d{4}\\s+\\d{2}:\\d{2}:\\d{2}\\.\\d+\\s+\\d+\\s+(.*):\\d+\\]'\n\n# Compiled regular expressions.\nANDROID_ABORT_REGEX = re.compile(r'^Abort message: (.*)')\nANDROID_FATAL_EXCEPTION_REGEX = re.compile(r'.*FATAL EXCEPTION.*:')\nANDROID_KERNEL_ERROR_REGEX = re.compile(\n r'.*Internal error: (Oops)?( -|:) (BUG|[0-9a-fA-F]+)')\nANDROID_KERNEL_STACK_FRAME_REGEX = re.compile(\n # e.g. \"[ 1998.156940] [<c0667574>] \"\n r'[^(]*\\[\\<([x0-9a-fA-F]+)\\>\\]\\s+'\n # e.g. \"(msm_vidc_prepare_buf+0xa0/0x124)\"; function (3), offset (4)\n r'\\(?(([\\w]+)\\+([\\w]+)/[\\w]+)\\)?')\nANDROID_KERNEL_STACK_FRAME_NO_ADDRESS_REGEX = re.compile(\n # e.g. \"(msm_vidc_prepare_buf+0xa0/0x124)\"; function (2), offset (3)\n r'\\(?(([\\w]+)\\+([\\w]+)/[\\w]+)\\)?')\nANDROID_KERNEL_TIME_REGEX = re.compile(r'^\\[\\s*\\d+\\.\\d+\\]\\s')\n# Parentheses are optional.\nANDROID_PROCESS_NAME_REGEX = re.compile(r'.*[(](.*)[)]$')\nANDROID_SEGV_REGEX = re.compile(r'.*signal.*\\(SIG.*fault addr ([^ ]*)(.*)')\nASAN_INVALID_FREE_REGEX = re.compile(\n r'.*AddressSanitizer: '\n r'attempting free on address which was not malloc\\(\\)-ed: '\n r'([xX0-9a-fA-F]+)')\nASAN_DOUBLE_FREE_REGEX = re.compile(\n r'.*(AddressSanitizer).*double-free'\n r' on (unknown address |address |)([xX0-9a-fA-F]+)')\nASAN_MEMCPY_OVERLAP_REGEX = re.compile(\n r'.*(AddressSanitizer).*memcpy-param-overlap'\n r'[^\\[]*([\\[].*[)])')\nASAN_REGEX = re.compile(\n r'.*ERROR: (HWAddressSanitizer|AddressSanitizer)[: ]*[ ]*([^(:;]+)')\nASSERT_REGEX = re.compile(\n r'(?:\\[.*?\\]|.*\\.(?:%s):.*)?' % ('|'.join(C_CPP_EXTENSIONS)) +\n r'\\s*(?:ASSERT(?:ION)? FAIL(?:URE|ED)|panic): (.*)', re.IGNORECASE)\nASSERT_REGEX_GOOGLE = re.compile(GOOGLE_LOG_FATAL_PREFIX +\n r'.*assertion failed at\\s.*\\sin\\s*.*: (.*)')\nASSERT_REGEX_GLIBC = re.compile(\n r'.*:\\s*assertion [`\\'\"]?(.*?)[`\\'\"]? failed\\.?$', re.IGNORECASE)\nASSERT_NOT_REACHED_REGEX = re.compile(r'^\\s*SHOULD NEVER BE REACHED\\s*$')\nCENTIPEDE_TIMEOUT_REGEX = re.compile(\n r'^========= Timeout of \\d+ seconds exceeded; exiting')\nCFI_ERROR_REGEX = re.compile(\n r'(.*): runtime error: control flow integrity check for type (.*) '\n r'failed during (.*vtable address ([xX0-9a-fA-F]+)|.*)')\nCFI_INVALID_DOWNCAST_REGEX = re.compile(r'.*note: vtable is of type (.*)')\nCFI_INVALID_VPTR_REGEX = re.compile(r'.*note: invalid vtable$')\nCFI_FUNC_DEFINED_HERE_REGEX = re.compile(r'.*note: .* defined here$')\nCFI_NODEBUG_ERROR_MARKER_REGEX = re.compile(\n r'CFI: Most likely a control flow integrity violation;.*')\nCHROME_CHECK_FAILURE_REGEX = re.compile(\n r'\\s*\\[[^\\]]*[:]([^\\](]*).*\\].*Check failed[:]\\s*(.*)')\nCHROME_STACK_FRAME_REGEX = re.compile(\n r'[ ]*(#(?P<frame_id>[0-9]+)[ ]' # frame id (2)\n r'([xX0-9a-fA-F]+)[ ])' # addr (3)\n r'([^/\\\\]+)$') # rest, usually fun (4); may have off\nCHROME_WIN_STACK_FRAME_REGEX = re.compile(\n r'[ ]*([^/\\\\]+) ' # fun (1)\n r'\\[([xX0-9a-fA-F]+)\\+' # fun_base (2)\n r'(\\d+)\\]' # off[dec] (3)\n r'( \\((.*):(\\d+)\\))?') # if available, file (5) and line (6)\nCHROME_MAC_STACK_FRAME_REGEX = re.compile(\n r'(?P<frame_id>\\d+)\\s+' # frame id (1)\n r'(([\\w ]+)|(\\?\\?\\?))\\s+' # image (2)\n r'([xX0-9a-fA-F]+)\\s+' # addr[hex] (5)\n r'([^/\\\\]+)\\s*\\+\\s*' # fun (6)\n r'(\\d+)') # off[dec] (7)\nMSAN_TSAN_REGEX = re.compile(\n r'.*(ThreadSanitizer|MemorySanitizer):\\s+(?!ABRT)(?!ILL)([^(:]+)')\nEXTRA_SANITIZERS_COMMAND_INJECTION_REGEX = re.compile(\n r'===BUG DETECTED: Shell (corruption|injection)===')\nEXTRA_SANITIZERS_ARBITRARY_FILE_OPEN_REGEX = re.compile(\n r'===BUG DETECTED: Arbitrary file open===')\nFATAL_ERROR_GENERIC_FAILURE = re.compile(r'#\\s+()(.*)')\nFATAL_ERROR_CHECK_FAILURE = re.compile(\n r'#\\s+(Check failed: |RepresentationChangerError: node #\\d+:)(.*)')\nFATAL_ERROR_DCHECK_FAILURE = re.compile(r'#\\s+(Debug check failed: )(.*)')\nFATAL_ERROR_REGEX = re.compile(r'#\\s*Fatal error in (.*)')\nFATAL_ERROR_LINE_REGEX = re.compile(r'#\\s*Fatal error in (.*), line [0-9]+')\nFATAL_ERROR_UNREACHABLE = re.compile(r'# un(reachable|implemented) code')\nGENERIC_SEGV_HANDLER_REGEX = re.compile(\n 'Received signal 11 SEGV_[A-Z]+ ([0-9a-f]*)')\nGOOGLE_CHECK_FAILURE_REGEX = re.compile(GOOGLE_LOG_FATAL_PREFIX +\n r'\\s*Check failed[:]\\s*(.*)')\nGOOGLE_LOG_FATAL_REGEX = re.compile(GOOGLE_LOG_FATAL_PREFIX + r'\\s*(.*)')\nGPU_PROCESS_FAILURE = re.compile(r'.*GPU process exited unexpectedly.*')\nHWASAN_ALLOCATION_TAIL_OVERWRITTEN_ADDRESS_REGEX = re.compile(\n r'.*ERROR: HWAddressSanitizer: allocation-tail-overwritten; '\n r'heap object \\[([xX0-9a-fA-F]+),.*of size')\nJAZZER_JAVA_SECURITY_EXCEPTION_REGEX = re.compile(\n '== Java Exception: .*FuzzerSecurityIssue')\nJAZZER_JAVA_EXCEPTION_REGEX = re.compile('== Java Exception: .*')\nJAVA_EXCEPTION_CRASH_STATE_REGEX = re.compile(r'\\s*at (.*)\\(.*\\)')\nKERNEL_BUG = re.compile(r'kernel BUG at (.*)')\nKASAN_ACCESS_TYPE_REGEX = re.compile(r'(Read|Write) of size ([0-9]+)')\nKASAN_ACCESS_TYPE_ADDRESS_REGEX = re.compile(\n r'(Read|Write) of size ([0-9]+) at (addr|address) ([a-f0-9]+)')\nKASAN_CRASH_TYPE_ADDRESS_REGEX = re.compile(\n r'BUG: KASAN: (.*) (in|on).*(addr|address) ([a-f0-9]+)')\nKASAN_CRASH_TYPE_ADDRESS_RANGE_REGEX = re.compile(\n r'KASAN: (.*?) (in|on) range \\[([a-z0-9]+)-([a-z0-9]+)\\]')\nKASAN_CRASH_TYPE_FUNCTION_REGEX = re.compile(\n r'BUG: KASAN: (.*) (in|on).* ([\\w]+)\\+([\\w]+)\\/([\\w]+)')\nKASAN_GPF_REGEX = re.compile(r'general protection fault:.*KASAN')\nKERNEL_PANIC = re.compile(r'Kernel panic - not syncing: (.*)')\nLIBFUZZER_DEADLY_SIGNAL_REGEX = re.compile(\n r'.*ERROR:\\s*libFuzzer:\\s*deadly signal')\nLIBFUZZER_FUZZ_TARGET_EXITED_REGEX = re.compile(\n r'.*ERROR:\\s*libFuzzer:\\s*fuzz target exited')\nLIBFUZZER_OVERWRITES_CONST_INPUT_REGEX = re.compile(\n r'.*ERROR:\\s*libFuzzer:\\s*fuzz target overwrites its const input')\nLIBFUZZER_TIMEOUT_REGEX = re.compile(r'.*ERROR:\\s*libFuzzer:\\s*timeout')\nLIBRARY_NOT_FOUND_ANDROID_REGEX = re.compile(\n r'.*: library ([`\\'\"])(.*)\\1 not found')\nLIBRARY_NOT_FOUND_LINUX_REGEX = re.compile(\n r'.*error while loading shared libraries: ([^:]*): '\n r'cannot open shared object file')\nLINUX_GDB_CRASH_TYPE_REGEX = re.compile(r'Program received signal ([a-zA-Z]+),')\nLINUX_GDB_CRASH_ADDRESS_REGEX = re.compile(r'rip[ ]+([xX0-9a-fA-F]+)')\nLINUX_GDB_CRASH_ADDRESS_NO_REGISTERS_REGEX = re.compile(\n r'^(0[xX][0-9a-fA-F]+)\\s+in\\s+')\nLSAN_DIRECT_LEAK_REGEX = re.compile(r'Direct leak of ')\nLSAN_INDIRECT_LEAK_REGEX = re.compile(r'Indirect leak of ')\nMAC_GDB_CRASH_ADDRESS_REGEX = re.compile(\n r'Reason:.*at address[^0-9]*([0-9a-zA-Z]+)')\nOUT_OF_MEMORY_REGEX = re.compile(r'.*(?:%s).*' % '|'.join([\n r'# Allocation failed.*out of memory',\n r'::OnNoMemory',\n r'ERROR.*Sanitizer failed to allocate',\n r'FatalProcessOutOfMemory', # V8\n r'Fatal (?:process|JavaScript) out of memory:', # V8\n r'Fatal JavaScript invalid size error', # V8\n r'FX_OutOfMemoryTerminate',\n r'Out of memory\\. Dying.',\n r'Out of memory\\. size=',\n r'Sanitizer: allocation-size-too-big',\n r'Sanitizer: calloc-overflow',\n r'Sanitizer: calloc parameters overflow',\n r'Sanitizer: requested allocation size.*exceeds maximum supported size',\n r'Sanitizer: out of memory',\n r'TerminateBecauseOutOfMemory',\n r'allocator is out of memory trying to allocate',\n r'blinkGCOutOfMemory',\n r'couldnt allocate.*Out of memory',\n r'libFuzzer: out-of-memory \\(',\n r'rss limit exhausted',\n r'in rust_oom',\n r'Failure description: out-of-memory', # Centipede.\n]))\nRUNTIME_ERROR_REGEX = re.compile(r'#\\s*Runtime error in (.*)')\nRUNTIME_ERROR_LINE_REGEX = re.compile(r'#\\s*Runtime error in (.*), line [0-9]+')\nRUST_ASSERT_REGEX = re.compile(r'thread\\s.*\\spanicked at \\'([^\\']*)',\n re.IGNORECASE)\nSAN_ABRT_REGEX = re.compile(r'.*[a-zA-Z]+Sanitizer: ABRT ')\nSAN_BREAKPOINT_REGEX = re.compile(r'.*[a-zA-Z]+Sanitizer: breakpoint ')\nSAN_CHECK_FAILURE_REGEX = re.compile(\n r'.*Sanitizer CHECK failed[:]\\s*[^ ]*\\s*(.*)')\nSAN_CRASH_TYPE_ADDRESS_REGEX = re.compile(\n r'[ ]*([^ ]*|Atomic [^ ]*) of size ([^ ]*) at ([^ ]*)')\nSAN_DEADLYSIGNAL_REGEX = re.compile(r'.*:DEADLYSIGNAL')\nSAN_FPE_REGEX = re.compile(r'.*[a-zA-Z]+Sanitizer: FPE ')\nSAN_ILL_REGEX = re.compile(r'.*[a-zA-Z]+Sanitizer: ILL ')\nSAN_TRAP_REGEX = re.compile(r'.*[a-zA-Z]+Sanitizer: TRAP ')\nSAN_SEGV_CRASH_TYPE_REGEX = re.compile(\n r'.*The signal is caused by a ([A-Z]+) memory access.')\n# FIXME: Replace when better ways to check signal crashes are available.\nSAN_SIGNAL_REGEX = re.compile(r'.*SCARINESS: (\\d+) \\(signal\\)', re.DOTALL)\nSAN_STACK_FRAME_REGEX = re.compile(\n # frame id (1)\n r'\\s*#(?P<frame_id>\\d+)\\s+'\n # addr (2)\n r'([xX0-9a-fA-F]+)\\s+'\n # Format is [in {fun}[+{off}]] [{file}[:{line}[:{char}]]] [({mod}[+{off}])]\n # If there is fun and mod/file info, extract\n # fun+off, where fun (7, 5, 23), off (8)\n r'((in\\s*(((.*)\\+([xX0-9a-fA-F]+))|(.*)) '\n r'('\n # file:line:char, where file (12, 16), line (13, 17), char (14)\n r'(([^ ]+):(\\d+):(\\d+))|(([^ ]+):(\\d+))'\n # or mod+off, where mod (19, 31), off (21, 32)\n r'|'\n r'(\\(([^+]+)(\\+([xX0-9a-fA-F]+))?\\)))'\n r')'\n # If there is only fun info, extract\n r'|'\n r'(in\\s*(((.*)\\+([xX0-9a-fA-F]+))|(.*)))'\n # If there is only mod info, extract\n r'|'\n r'(\\((((.*)\\+([xX0-9a-fA-F]+))|(.*))\\))'\n r')')\nSAN_ADDR_REGEX = re.compile(r'.*(ERROR: [a-zA-Z]+Sanitizer)[: ]*(.*) on '\n r'(unknown address |address |)([xX0-9a-fA-F]+)')\nSAN_SEGV_REGEX = re.compile(r'.*([a-zA-Z]+Sanitizer).*(SEGV|access-violation) '\n r'on unknown address ([xX0-9a-fA-F]+)')\nSECURITY_CHECK_FAILURE_REGEX = re.compile(\n r'.*\\[[^\\]]*[:]([^\\](]*).*\\].*Security CHECK failed[:]\\s*(.*)\\.\\s*')\nSECURITY_DCHECK_FAILURE_REGEX = re.compile(\n r'.*\\[[^\\]]*[:]([^\\](]*).*\\].*Security DCHECK failed[:]\\s*(.*)\\.\\s*')\nUBSAN_DIVISION_BY_ZERO_REGEX = re.compile(r'.*division by zero.*')\nUBSAN_FLOAT_CAST_OVERFLOW_REGEX = re.compile(r'.*outside the range of '\n r'representable values.*')\nUBSAN_INCORRECT_FUNCTION_POINTER_REGEX = re.compile(\n r'.*call to function [^\\s]+ through pointer to incorrect function type.*')\nUBSAN_INDEX_OOB_REGEX = re.compile(r'.*out of bounds for type.*')\nUBSAN_UNSIGNED_INTEGER_OVERFLOW_REGEX = re.compile(\n r'.*unsigned integer overflow.*')\nUBSAN_INTEGER_OVERFLOW_REGEX = re.compile(\n r'.*(integer overflow|'\n r'(negation|division) of.*cannot be represented in type).*')\nUBSAN_INVALID_BOOL_VALUE_REGEX = re.compile(\n r'.*not a valid value for type \\'(bool|BOOL)\\'.*')\nUBSAN_INVALID_BUILTIN_REGEX = re.compile(r'.*, which is not a valid argument.*')\nUBSAN_INVALID_ENUM_VALUE_REGEX = re.compile(r'.*not a valid value for type.*')\nUBSAN_MISALIGNED_ADDRESS_REGEX = re.compile(r'.*misaligned address.*')\nUBSAN_NO_RETURN_VALUE_REGEX = re.compile(\n r'.*reached the end of a value-returning function.*')\nUBSAN_NULL_ARGUMENT_REGEX = re.compile(\n r'.*null pointer passed as .*, which is declared to never be null.*')\nUBSAN_NULL_POINTER_READ_REGEX = re.compile(r'.*load of null pointer.*')\nUBSAN_NULL_POINTER_REFERENCE_REGEX = re.compile(\n r'.*(binding to|access within|call on) null pointer.*')\nUBSAN_NULL_POINTER_WRITE_REGEX = re.compile(r'.*store to null pointer.*')\nUBSAN_OBJECT_SIZE_REGEX = re.compile(\n r'.*address .* with insufficient space for an object of type.*')\nUBSAN_POINTER_OVERFLOW_REGEX = re.compile(\n r'.*((addition|subtraction) of unsigned offset |'\n r'pointer index expression with base |'\n r'applying non-zero offset [0-9]+ to null pointer|'\n r'applying zero offset to null pointer).*')\nUBSAN_RETURNS_NONNULL_ATTRIBUTE_REGEX = re.compile(\n r'.*null pointer returned from function declared to never return null.*')\nUBSAN_RUNTIME_ERROR_REGEX = re.compile(r'(.*): runtime error: (.*)')\nUBSAN_SHIFT_ERROR_REGEX = re.compile(r'.*shift.*')\nUBSAN_UNREACHABLE_REGEX = re.compile(\n r'.*execution reached an unreachable program point.*')\nUBSAN_VLA_BOUND_REGEX = re.compile(\n r'.*variable length array bound evaluates to non-positive value.*')\nUBSAN_VPTR_REGEX = re.compile(\n r'(.*): runtime error: '\n r'(member access within|member call on|downcast of)'\n r' address ([xX0-9a-fA-F]+) .* of type (.*)')\nUBSAN_VPTR_INVALID_DOWNCAST_REGEX = re.compile(\n r'.*note: object is of type (.*)')\nUBSAN_VPTR_INVALID_OFFSET_REGEX = re.compile(\n r'.*at offset (\\d+) within object of type (.*)')\nUBSAN_VPTR_INVALID_VPTR_REGEX = re.compile(r'.*note: object has invalid vptr')\nV8_ABORT_FAILURE_REGEX = re.compile(r'^abort: (CSA_ASSERT failed:.*)')\nV8_ABORT_METADATA_REGEX = re.compile(r'(.*) \\[(.*):\\d+\\]$')\nV8_CORRECTNESS_FAILURE_REGEX = re.compile(r'#\\s*V8 correctness failure')\nV8_CORRECTNESS_METADATA_REGEX = re.compile(\n r'#\\s*V8 correctness ((configs|sources|suppression): .*)')\nV8_ERROR_REGEX = re.compile(r'\\s*\\[[^\\]]*\\] V8 error: (.+)\\.$')\nWINDOWS_CDB_STACK_FRAME_REGEX = re.compile(\n r'([0-9a-zA-Z`]+) ' # Child EBP or SP; remove ` if needed (1)\n r'([0-9a-zA-Z`]+) ' # RetAddr; remove ` if needed (2)\n r'([0-9a-zA-Z_]+)' # mod (3)\n r'!(.*)\\+' # fun (4)\n r'([xX0-9a-fA-F]+)') # off (5)\nWINDOWS_CDB_STACK_START_REGEX = re.compile(r'ChildEBP RetAddr')\nWINDOWS_CDB_CRASH_TYPE_ADDRESS_REGEX = re.compile(\n r'Attempt to (.*) [^ ]* address (.*)')\nWINDOWS_CDB_CRASH_TYPE_REGEX = re.compile(\n r'.*DEFAULT_BUCKET_ID[ ]*[:][ ]*([a-zA-Z_]+)')\nWINDOWS_CDB_STACK_OVERFLOW_REGEX = re.compile(\n r'.*ExceptionCode: .*\\(Stack overflow\\).*')\n\n# Golang specific regular expressions.\nGOLANG_DIVISION_BY_ZERO_REGEX = re.compile(\n r'^panic: runtime error: integer divide by zero.*')\nGOLANG_INDEX_OUT_OF_RANGE_REGEX = re.compile(\n r'^panic: runtime error: index out of range.*')\nGOLANG_INVALID_MEMORY_ADDRESS_REGEX = re.compile(\n r'^panic: runtime error: invalid memory address.*')\nGOLANG_MAKESLICE_LEN_OUT_OF_RANGE_REGEX = re.compile(\n r'^panic: runtime error: makeslice: len out of range.*')\nGOLANG_SLICE_BOUNDS_OUT_OF_RANGE_REGEX = re.compile(\n r'^panic: runtime error: slice bounds out of range.*')\nGOLANG_STACK_OVERFLOW_REGEX = re.compile(r'^fatal error: stack overflow.*')\n\nGOLANG_CRASH_TYPES_MAP = [\n (GOLANG_DIVISION_BY_ZERO_REGEX, 'Integer divide by zero'),\n (GOLANG_INDEX_OUT_OF_RANGE_REGEX, 'Index out of range'),\n (GOLANG_INVALID_MEMORY_ADDRESS_REGEX, 'Invalid memory address'),\n (GOLANG_MAKESLICE_LEN_OUT_OF_RANGE_REGEX, 'Makeslice: len out of range'),\n (GOLANG_SLICE_BOUNDS_OUT_OF_RANGE_REGEX, 'Slice bounds out of range'),\n (GOLANG_STACK_OVERFLOW_REGEX, 'Stack overflow'),\n]\n\nGOLANG_FATAL_ERROR_REGEX = re.compile(r'^fatal error: (.*)')\n\nGOLANG_STACK_FRAME_FUNCTION_REGEX = re.compile(\n r'^([0-9a-zA-Z\\.\\-\\_\\\\\\/\\(\\)\\*]+)\\([x0-9a-f\\s,\\.{}]*\\)$')\n\n# Python specific regular expressions.\nPYTHON_UNHANDLED_EXCEPTION = re.compile(\n r'^\\s*=== Uncaught Python exception: ===$')\n\nPYTHON_CRASH_TYPES_MAP = [\n (PYTHON_UNHANDLED_EXCEPTION, 'Uncaught exception'),\n]\n\nPYTHON_STACK_FRAME_FUNCTION_REGEX = re.compile(\n # File \"<embedded stdlib>/gzip.py\", line 421, in _read_gzip_header\n r'^\\s*File \"([^\"]+)\", line (\\d+), in (.+)$')\n\n# Mappings of Android kernel error status codes to strings.\nANDROID_KERNEL_STATUS_TO_STRING = {\n 0b0001: 'Alignment Fault',\n 0b0100: 'Instruction Cache Maintenance Fault',\n 0b1100: 'L1 Translation',\n 0b1110: 'L2 Translation',\n 0b0101: 'Translation Fault, Section',\n 0b0111: 'Translation Fault, Page',\n 0b0011: 'Access Flag Fault, Section',\n 0b0110: 'Access Flag Fault, Page',\n 0b1001: 'Domain Fault, Section',\n 0b1011: 'Domain Fault, Page',\n 0b1101: 'Permission Fault, Section',\n 0b1111: 'Permissions Fault, Page',\n}\n\n# Ignore lists.\nSTACK_FRAME_IGNORE_REGEXES = [\n # Function names (exact match).\n r'^abort$',\n r'^exit$',\n r'^pthread_create$',\n r'^pthread_kill$',\n r'^raise$',\n r'^tgkill$',\n r'^__chk_fail$',\n r'^__fortify_fail$',\n\n # Function names (startswith).\n r'^(|__)aeabi_',\n r'^(|__)memcmp',\n r'^(|__)memcpy',\n r'^(|__)memmove',\n r'^(|__)memset',\n r'^(|__)strcmp',\n r'^(|__)strcpy',\n r'^(|__)strdup',\n r'^(|__)strlen',\n r'^(|__)strncpy',\n r'^<null>',\n r'^Abort\\(',\n r'^CFCrash',\n r'^ExitCallback',\n r'^IsSandboxedProcess',\n r'^LLVMFuzzerTestOneInput',\n r'^MSanAtExitWrapper',\n r'^New',\n r'^RaiseException',\n r'^SbSystemBreakIntoDebugger',\n r'^SignalAction',\n r'^SignalHandler',\n r'^TestOneProtoInput',\n r'^WTF::',\n r'^WTFCrash',\n r'^X11Error',\n r'^_L_unlock_',\n r'^_\\$LT\\$',\n r'^__GI_',\n r'^__asan::',\n r'^__asan_',\n r'^__assert_',\n r'^__cxa_atexit',\n r'^__cxa_rethrow',\n r'^__cxa_throw',\n r'^__dump_stack',\n r'^__hwasan::',\n r'^__hwasan_',\n r'^__interceptor_',\n r'^__kasan_',\n r'^__libc_',\n r'^__lsan::',\n r'^__lsan_',\n r'^__msan::',\n r'^__msan_',\n r'^__pthread_kill',\n r'^__run_exit_handlers',\n r'^__rust_try',\n r'^__sanitizer::',\n r'^__sanitizer_',\n r'^__tsan::',\n r'^__tsan_',\n r'^__ubsan::',\n r'^__ubsan_',\n r'^_asan_',\n r'^_hwasan_',\n r'^_lsan_',\n r'^_msan_',\n r'^_objc_terminate',\n r'^_sanitizer_',\n r'^_start',\n r'^_tsan_',\n r'^_ubsan_',\n r'^abort',\n r'^alloc::',\n r'^android\\.app\\.ActivityManagerProxy\\.',\n r'^android\\.os\\.Parcel\\.',\n r'^art::Thread::CreateNativeThread',\n r'^asan_',\n r'^asan\\.module_ctor',\n r'^asan\\.module_dtor',\n r'^calloc',\n r'^check_memory_region',\n r'^common_exit',\n r'^core::fmt::write',\n r'^delete',\n r'^demangling_terminate_handler',\n r'^dump_backtrace',\n r'^dump_stack',\n r'^exit_or_terminate_process',\n r'^fpehandler\\(',\n r'^free',\n r'^fuzzer::',\n r'^g_log',\n r'^generic_cpp_',\n r'^gsignal',\n r'^kasan_',\n r'^libfuzzer_sys::initialize',\n r'^main',\n r'^malloc',\n r'^mozalloc_',\n r'^new',\n r'^object_err',\n r'^operator',\n r'^panic_abort::',\n r'^print_trailer',\n r'^realloc',\n r'^rust_begin_unwind',\n r'^rust_fuzzer_test_input',\n r'^rust_oom',\n r'^rust_panic',\n r'^scanf',\n r'^show_stack',\n r'^std::__terminate',\n r'^std::io::Write::write_fmt',\n r'^std::panic',\n r'^std::process::abort',\n r'^std::sys::unix::abort',\n r'^std::sys_common::backtrace',\n r'^__rust_start_panic',\n r'^__scrt_common_main_seh',\n\n # Functions names (contains).\n r'.*ASAN_OnSIGSEGV',\n r'.*BaseThreadInitThunk',\n r'.*DebugBreak',\n r'.*DefaultDcheckHandler',\n r'.*ForceCrashOnSigAbort',\n r'.*MemoryProtection::CMemoryProtector',\n r'.*PartitionAlloc',\n r'.*RtlFreeHeap',\n r'.*RtlInitializeExceptionChain',\n r'.*RtlReportCriticalFailure',\n r'.*RtlUserThreadStart',\n r'.*RtlpHeapHandleError',\n r'.*RtlpLogHeapFailure',\n r'.*SkDebugf',\n r'.*StackDumpSignalHandler',\n r'.*__android_log_assert',\n r'.*__tmainCRTStartup',\n r'.*_asan_rtl_',\n r'.*agent::asan::',\n r'.*allocator_shim',\n r'.*asan_Heap',\n r'.*asan_check_access',\n r'.*asan_osx_dynamic\\.dylib',\n r'.*assert',\n r'.*base::FuzzedDataProvider',\n r'.*base::allocator',\n r'.*base::android::CheckException',\n r'.*base::debug::BreakDebugger',\n r'.*base::debug::CollectStackTrace',\n r'.*base::debug::StackTrace::StackTrace',\n r'.*ieee754\\-',\n r'.*libpthread',\n r'.*logger',\n r'.*logging::CheckError',\n r'.*logging::ErrnoLogMessage',\n r'.*logging::LogMessage',\n r'.*stdext::exception::what',\n r'.*v8::base::OS::Abort',\n\n # File paths.\n r'.* base/callback',\n r'.* /rust(|c)/',\n r'.*/AOSP\\-toolchain/',\n r'.*/bindings/ToV8\\.h',\n r'.*/crosstool/',\n r'.*/gcc/',\n r'.*/glibc\\-',\n r'.*/jemalloc/',\n r'.*/libc\\+\\+',\n r'.*/libc/',\n r'.*/llvm\\-build/',\n r'.*/minkernel/crts/',\n r'.*/sanitizer_common/',\n r'.*/tcmalloc/',\n r'.*/vc/include/',\n r'.*/vctools/crt/',\n r'.*/win_toolchain/',\n r'.*libc\\+\\+/',\n\n # Wrappers from honggfuzz/libhfuzz/memorycmp.c.\n r'.*/memorycmp\\.c',\n\n # Others (uncategorized).\n r'.*\\+Unknown',\n r'.*<unknown module>',\n r'.*Inline Function @',\n r'^<unknown>$',\n r'^\\[vdso\\]$',\n r'^linux-gate.so.*$',\n\n # Golang specific frames to ignore.\n r'^panic$',\n r'^runtime\\.',\n\n # Fuchsia specific.\n r'^CrashTrampolineAsm',\n r'^libc_io_functions_not_implemented_use_fdio_instead',\n r'^<libclang_rt.asan.so>',\n r'^__zx_panic',\n r'^syslog::LogMessage',\n\n # Android kernel stack frame ignores.\n r'^print_address_description',\n r'^_etext',\n\n # Swift specific.\n r'^_swift_stdlib_',\n\n # googlefuzztest specific.\n r'.*fuzztest::internal::',\n\n # V8 specific.\n r'^V8_Fatal',\n # Ignore error-throwing frames, the bug is in the caller.\n r'^blink::ReportV8FatalError',\n r'^v8::api_internal::ToLocalEmpty',\n]\n\nSTACK_FRAME_IGNORE_REGEXES_IF_SYMBOLIZED = [\n r'.*libc\\.so',\n r'.*libc\\+\\+\\.so',\n r'.*libc\\+\\+_shared\\.so',\n r'.*libstdc\\+\\+\\.so',\n r'.*libc-.*\\.so',\n]\n\nIGNORE_CRASH_TYPES_FOR_ABRT_BREAKPOINT_AND_ILLS = [\n 'Arbitrary file open',\n 'ASSERT',\n 'CHECK failure',\n 'Command injection',\n 'DCHECK failure',\n 'Fatal error',\n 'Security CHECK failure',\n 'Security DCHECK failure',\n 'V8 API error',\n]\n\nSTATE_STOP_MARKERS = [\n 'Direct leak of',\n 'Uninitialized value was stored to memory at',\n 'allocated by thread',\n 'created by main thread at',\n 'located in stack of thread',\n 'previously allocated by',\n]\n\nUBSAN_CRASH_TYPES_MAP = [\n (UBSAN_DIVISION_BY_ZERO_REGEX, 'Divide-by-zero'),\n (UBSAN_FLOAT_CAST_OVERFLOW_REGEX, 'Float-cast-overflow'),\n (UBSAN_INCORRECT_FUNCTION_POINTER_REGEX, 'Incorrect-function-pointer-type'),\n (UBSAN_INDEX_OOB_REGEX, 'Index-out-of-bounds'),\n (UBSAN_INVALID_BOOL_VALUE_REGEX, 'Invalid-bool-value'),\n (UBSAN_INVALID_BUILTIN_REGEX, 'Invalid-builtin-use'),\n (UBSAN_MISALIGNED_ADDRESS_REGEX, 'Misaligned-address'),\n (UBSAN_NO_RETURN_VALUE_REGEX, 'No-return-value'),\n (UBSAN_NULL_ARGUMENT_REGEX, 'Invalid-null-argument'),\n (UBSAN_NULL_POINTER_READ_REGEX, 'Null-dereference READ'),\n (UBSAN_NULL_POINTER_REFERENCE_REGEX, 'Null-dereference'),\n (UBSAN_NULL_POINTER_WRITE_REGEX, 'Null-dereference WRITE'),\n (UBSAN_OBJECT_SIZE_REGEX, 'Object-size'),\n (UBSAN_POINTER_OVERFLOW_REGEX, 'Pointer-overflow'),\n (UBSAN_RETURNS_NONNULL_ATTRIBUTE_REGEX, 'Invalid-null-return'),\n (UBSAN_SHIFT_ERROR_REGEX, 'Undefined-shift'),\n (UBSAN_UNREACHABLE_REGEX, 'Unreachable code'),\n (UBSAN_UNSIGNED_INTEGER_OVERFLOW_REGEX, 'Unsigned-integer-overflow'),\n (UBSAN_VLA_BOUND_REGEX, 'Non-positive-vla-bound-value'),\n\n # The following types are supersets of other types, and should be placed\n # at the end to avoid subsuming crashes from the more specialized types.\n (UBSAN_INVALID_ENUM_VALUE_REGEX, 'Invalid-enum-value'),\n (UBSAN_INTEGER_OVERFLOW_REGEX, 'Integer-overflow'),\n]\n\n# Additional regexes for cleaning up format.\nSTRIP_STRUCTURE_REGEXES = [\n re.compile(r'^in (.*)'), # sanitizers have prefix for function if present\n re.compile(r'^\\((.*)\\)$'), # sanitizers wrap module if no function\n]\n\n# Other constants.\nLINE_LENGTH_CAP = 80\nMAX_CRASH_STATE_FRAMES = 3\nMAX_CYCLE_LENGTH = 10\nREPEATED_CYCLE_COUNT = 3\n\n# Stackframe format specifications.\nCHROME_STACK_FRAME_SPEC = stack_parser.StackFrameSpec(\n address=3, function_name=4)\nCHROME_WIN_STACK_FRAME_SPEC = stack_parser.StackFrameSpec(\n function_name=1,\n function_base=2,\n function_offset=3,\n filename=5,\n fileline=6,\n base=10)\nCHROME_MAC_STACK_FRAME_SPEC = stack_parser.StackFrameSpec(\n address=5, function_name=6, function_offset=7, module_name=2, base=10)\nSAN_STACK_FRAME_SPEC = stack_parser.StackFrameSpec(\n address=2,\n function_name=[7, 5, 23],\n function_offset=8,\n filename=[12, 16],\n fileline=[13, 17],\n module_name=[19, 31],\n module_offset=[21, 32])\nWINDOWS_CDB_STACK_FRAME_SPEC = stack_parser.StackFrameSpec(\n address=1, function_name=4, function_offset=5, module_name=3)\n",
"path": "src/clusterfuzz/stacktraces/constants.py"
}
] | [
{
"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Stack parsing constants.\"\"\"\n\nimport re\n\nfrom clusterfuzz._internal.crash_analysis.stack_parsing import stack_parser\n\nC_CPP_EXTENSIONS = ['c', 'cc', 'cpp', 'cxx', 'h', 'hh', 'hpp', 'hxx']\n\n# Patterns which cannot be compiled directly, or which are used for direct\n# comparison.\nCHECK_FAILURE_PATTERN = r'Check failed: '\nJNI_ERROR_STRING = r'JNI DETECTED ERROR IN APPLICATION:'\n\n# Common log prefix format for Google fatal logs.\nGOOGLE_LOG_FATAL_PREFIX = r'^F\\d{4}\\s+\\d{2}:\\d{2}:\\d{2}\\.\\d+\\s+\\d+\\s+(.*):\\d+\\]'\n\n# Compiled regular expressions.\nANDROID_ABORT_REGEX = re.compile(r'^Abort message: (.*)')\nANDROID_FATAL_EXCEPTION_REGEX = re.compile(r'.*FATAL EXCEPTION.*:')\nANDROID_KERNEL_ERROR_REGEX = re.compile(\n r'.*Internal error: (Oops)?( -|:) (BUG|[0-9a-fA-F]+)')\nANDROID_KERNEL_STACK_FRAME_REGEX = re.compile(\n # e.g. \"[ 1998.156940] [<c0667574>] \"\n r'[^(]*\\[\\<([x0-9a-fA-F]+)\\>\\]\\s+'\n # e.g. \"(msm_vidc_prepare_buf+0xa0/0x124)\"; function (3), offset (4)\n r'\\(?(([\\w]+)\\+([\\w]+)/[\\w]+)\\)?')\nANDROID_KERNEL_STACK_FRAME_NO_ADDRESS_REGEX = re.compile(\n # e.g. \"(msm_vidc_prepare_buf+0xa0/0x124)\"; function (2), offset (3)\n r'\\(?(([\\w]+)\\+([\\w]+)/[\\w]+)\\)?')\nANDROID_KERNEL_TIME_REGEX = re.compile(r'^\\[\\s*\\d+\\.\\d+\\]\\s')\n# Parentheses are optional.\nANDROID_PROCESS_NAME_REGEX = re.compile(r'.*[(](.*)[)]$')\nANDROID_SEGV_REGEX = re.compile(r'.*signal.*\\(SIG.*fault addr ([^ ]*)(.*)')\nASAN_INVALID_FREE_REGEX = re.compile(\n r'.*AddressSanitizer: '\n r'attempting free on address which was not malloc\\(\\)-ed: '\n r'([xX0-9a-fA-F]+)')\nASAN_DOUBLE_FREE_REGEX = re.compile(\n r'.*(AddressSanitizer).*double-free'\n r' on (unknown address |address |)([xX0-9a-fA-F]+)')\nASAN_MEMCPY_OVERLAP_REGEX = re.compile(\n r'.*(AddressSanitizer).*memcpy-param-overlap'\n r'[^\\[]*([\\[].*[)])')\nASAN_REGEX = re.compile(\n r'.*ERROR: (HWAddressSanitizer|AddressSanitizer)[: ]*[ ]*([^(:;]+)')\nASSERT_REGEX = re.compile(\n r'(?:\\[.*?\\]|.*\\.(?:%s):.*)?' % ('|'.join(C_CPP_EXTENSIONS)) +\n r'\\s*(?:ASSERT(?:ION)? FAIL(?:URE|ED)|panic): (.*)', re.IGNORECASE)\nASSERT_REGEX_GOOGLE = re.compile(GOOGLE_LOG_FATAL_PREFIX +\n r'.*assertion failed at\\s.*\\sin\\s*.*: (.*)')\nASSERT_REGEX_GLIBC = re.compile(\n r'.*:\\s*assertion [`\\'\"]?(.*?)[`\\'\"]? failed\\.?$', re.IGNORECASE)\nASSERT_NOT_REACHED_REGEX = re.compile(r'^\\s*SHOULD NEVER BE REACHED\\s*$')\nCENTIPEDE_TIMEOUT_REGEX = re.compile(\n r'^========= Timeout of \\d+ seconds exceeded; exiting')\nCFI_ERROR_REGEX = re.compile(\n r'(.*): runtime error: control flow integrity check for type (.*) '\n r'failed during (.*vtable address ([xX0-9a-fA-F]+)|.*)')\nCFI_INVALID_DOWNCAST_REGEX = re.compile(r'.*note: vtable is of type (.*)')\nCFI_INVALID_VPTR_REGEX = re.compile(r'.*note: invalid vtable$')\nCFI_FUNC_DEFINED_HERE_REGEX = re.compile(r'.*note: .* defined here$')\nCFI_NODEBUG_ERROR_MARKER_REGEX = re.compile(\n r'CFI: Most likely a control flow integrity violation;.*')\nCHROME_CHECK_FAILURE_REGEX = re.compile(\n r'\\s*\\[[^\\]]*[:]([^\\](]*).*\\].*Check failed[:]\\s*(.*)')\nCHROME_STACK_FRAME_REGEX = re.compile(\n r'[ ]*(#(?P<frame_id>[0-9]+)[ ]' # frame id (2)\n r'([xX0-9a-fA-F]+)[ ])' # addr (3)\n r'([^/\\\\]+)$') # rest, usually fun (4); may have off\nCHROME_WIN_STACK_FRAME_REGEX = re.compile(\n r'[ ]*([^/\\\\]+) ' # fun (1)\n r'\\[([xX0-9a-fA-F]+)\\+' # fun_base (2)\n r'(\\d+)\\]' # off[dec] (3)\n r'( \\((.*):(\\d+)\\))?') # if available, file (5) and line (6)\nCHROME_MAC_STACK_FRAME_REGEX = re.compile(\n r'(?P<frame_id>\\d+)\\s+' # frame id (1)\n r'(([\\w ]+)|(\\?\\?\\?))\\s+' # image (2)\n r'([xX0-9a-fA-F]+)\\s+' # addr[hex] (5)\n r'([^/\\\\]+)\\s*\\+\\s*' # fun (6)\n r'(\\d+)') # off[dec] (7)\nMSAN_TSAN_REGEX = re.compile(\n r'.*(ThreadSanitizer|MemorySanitizer):\\s+(?!ABRT)(?!ILL)([^(:]+)')\nEXTRA_SANITIZERS_COMMAND_INJECTION_REGEX = re.compile(\n r'===BUG DETECTED: Shell (corruption|injection)===')\nEXTRA_SANITIZERS_ARBITRARY_FILE_OPEN_REGEX = re.compile(\n r'===BUG DETECTED: Arbitrary file open===')\nFATAL_ERROR_GENERIC_FAILURE = re.compile(r'#\\s+()(.*)')\nFATAL_ERROR_CHECK_FAILURE = re.compile(\n r'#\\s+(Check failed: |RepresentationChangerError: node #\\d+:)(.*)')\nFATAL_ERROR_DCHECK_FAILURE = re.compile(r'#\\s+(Debug check failed: )(.*)')\nFATAL_ERROR_REGEX = re.compile(r'#\\s*Fatal error in (.*)')\nFATAL_ERROR_LINE_REGEX = re.compile(r'#\\s*Fatal error in (.*), line [0-9]+')\nFATAL_ERROR_UNREACHABLE = re.compile(r'# un(reachable|implemented) code')\nGENERIC_SEGV_HANDLER_REGEX = re.compile(\n 'Received signal 11 SEGV_[A-Z]+ ([0-9a-f]*)')\nGOOGLE_CHECK_FAILURE_REGEX = re.compile(GOOGLE_LOG_FATAL_PREFIX +\n r'\\s*Check failed[:]\\s*(.*)')\nGOOGLE_LOG_FATAL_REGEX = re.compile(GOOGLE_LOG_FATAL_PREFIX + r'\\s*(.*)')\nGPU_PROCESS_FAILURE = re.compile(r'.*GPU process exited unexpectedly.*')\nHWASAN_ALLOCATION_TAIL_OVERWRITTEN_ADDRESS_REGEX = re.compile(\n r'.*ERROR: HWAddressSanitizer: allocation-tail-overwritten; '\n r'heap object \\[([xX0-9a-fA-F]+),.*of size')\nJAZZER_JAVA_SECURITY_EXCEPTION_REGEX = re.compile(\n '== Java Exception: .*FuzzerSecurityIssue')\nJAZZER_JAVA_EXCEPTION_REGEX = re.compile('== Java Exception: .*')\nJAVA_EXCEPTION_CRASH_STATE_REGEX = re.compile(r'\\s*at (.*)\\(.*\\)')\nKERNEL_BUG = re.compile(r'kernel BUG at (.*)')\nKASAN_ACCESS_TYPE_REGEX = re.compile(r'(Read|Write) of size ([0-9]+)')\nKASAN_ACCESS_TYPE_ADDRESS_REGEX = re.compile(\n r'(Read|Write) of size ([0-9]+) at (addr|address) ([a-f0-9]+)')\nKASAN_CRASH_TYPE_ADDRESS_REGEX = re.compile(\n r'BUG: KASAN: (.*) (in|on).*(addr|address) ([a-f0-9]+)')\nKASAN_CRASH_TYPE_ADDRESS_RANGE_REGEX = re.compile(\n r'KASAN: (.*?) (in|on) range \\[([a-z0-9]+)-([a-z0-9]+)\\]')\nKASAN_CRASH_TYPE_FUNCTION_REGEX = re.compile(\n r'BUG: KASAN: (.*) (in|on).* ([\\w]+)\\+([\\w]+)\\/([\\w]+)')\nKASAN_GPF_REGEX = re.compile(r'general protection fault:.*KASAN')\nKERNEL_PANIC = re.compile(r'Kernel panic - not syncing: (.*)')\nLIBFUZZER_DEADLY_SIGNAL_REGEX = re.compile(\n r'.*ERROR:\\s*libFuzzer:\\s*deadly signal')\nLIBFUZZER_FUZZ_TARGET_EXITED_REGEX = re.compile(\n r'.*ERROR:\\s*libFuzzer:\\s*fuzz target exited')\nLIBFUZZER_OVERWRITES_CONST_INPUT_REGEX = re.compile(\n r'.*ERROR:\\s*libFuzzer:\\s*fuzz target overwrites its const input')\nLIBFUZZER_TIMEOUT_REGEX = re.compile(r'.*ERROR:\\s*libFuzzer:\\s*timeout')\nLIBRARY_NOT_FOUND_ANDROID_REGEX = re.compile(\n r'.*: library ([`\\'\"])(.*)\\1 not found')\nLIBRARY_NOT_FOUND_LINUX_REGEX = re.compile(\n r'.*error while loading shared libraries: ([^:]*): '\n r'cannot open shared object file')\nLINUX_GDB_CRASH_TYPE_REGEX = re.compile(r'Program received signal ([a-zA-Z]+),')\nLINUX_GDB_CRASH_ADDRESS_REGEX = re.compile(r'rip[ ]+([xX0-9a-fA-F]+)')\nLINUX_GDB_CRASH_ADDRESS_NO_REGISTERS_REGEX = re.compile(\n r'^(0[xX][0-9a-fA-F]+)\\s+in\\s+')\nLSAN_DIRECT_LEAK_REGEX = re.compile(r'Direct leak of ')\nLSAN_INDIRECT_LEAK_REGEX = re.compile(r'Indirect leak of ')\nMAC_GDB_CRASH_ADDRESS_REGEX = re.compile(\n r'Reason:.*at address[^0-9]*([0-9a-zA-Z]+)')\nOUT_OF_MEMORY_REGEX = re.compile(r'.*(?:%s).*' % '|'.join([\n r'# Allocation failed.*out of memory',\n r'::OnNoMemory',\n r'ERROR.*Sanitizer failed to allocate',\n r'FatalProcessOutOfMemory', # V8\n r'Fatal (?:process|JavaScript) out of memory:', # V8\n r'Fatal JavaScript invalid size error', # V8\n r'FX_OutOfMemoryTerminate',\n r'Out of memory\\. Dying.',\n r'Out of memory\\. size=',\n r'Sanitizer: allocation-size-too-big',\n r'Sanitizer: calloc-overflow',\n r'Sanitizer: calloc parameters overflow',\n r'Sanitizer: requested allocation size.*exceeds maximum supported size',\n r'Sanitizer: out of memory',\n r'TerminateBecauseOutOfMemory',\n r'allocator is out of memory trying to allocate',\n r'blinkGCOutOfMemory',\n r'couldnt allocate.*Out of memory',\n r'libFuzzer: out-of-memory \\(',\n r'rss limit exhausted',\n r'in rust_oom',\n r'Failure description: out-of-memory', # Centipede.\n]))\nRUNTIME_ERROR_REGEX = re.compile(r'#\\s*Runtime error in (.*)')\nRUNTIME_ERROR_LINE_REGEX = re.compile(r'#\\s*Runtime error in (.*), line [0-9]+')\nRUST_ASSERT_REGEX = re.compile(r'thread\\s.*\\spanicked at \\'([^\\']*)',\n re.IGNORECASE)\nSAN_ABRT_REGEX = re.compile(r'.*[a-zA-Z]+Sanitizer: ABRT ')\nSAN_BREAKPOINT_REGEX = re.compile(r'.*[a-zA-Z]+Sanitizer: breakpoint ')\nSAN_CHECK_FAILURE_REGEX = re.compile(\n r'.*Sanitizer CHECK failed[:]\\s*[^ ]*\\s*(.*)')\nSAN_CRASH_TYPE_ADDRESS_REGEX = re.compile(\n r'[ ]*([^ ]*|Atomic [^ ]*) of size ([^ ]*) at ([^ ]*)')\nSAN_DEADLYSIGNAL_REGEX = re.compile(r'.*:DEADLYSIGNAL')\nSAN_FPE_REGEX = re.compile(r'.*[a-zA-Z]+Sanitizer: FPE ')\nSAN_ILL_REGEX = re.compile(r'.*[a-zA-Z]+Sanitizer: ILL ')\nSAN_TRAP_REGEX = re.compile(r'.*[a-zA-Z]+Sanitizer: TRAP ')\nSAN_SEGV_CRASH_TYPE_REGEX = re.compile(\n r'.*The signal is caused by a ([A-Z]+) memory access.')\n# FIXME: Replace when better ways to check signal crashes are available.\nSAN_SIGNAL_REGEX = re.compile(r'.*SCARINESS: (\\d+) \\(signal\\)', re.DOTALL)\nSAN_STACK_FRAME_REGEX = re.compile(\n # frame id (1)\n r'\\s*#(?P<frame_id>\\d+)\\s+'\n # addr (2)\n r'([xX0-9a-fA-F]+)\\s+'\n # Format is [in {fun}[+{off}]] [{file}[:{line}[:{char}]]] [({mod}[+{off}])]\n # If there is fun and mod/file info, extract\n # fun+off, where fun (7, 5, 23), off (8)\n r'((in\\s*(((.*)\\+([xX0-9a-fA-F]+))|(.*)) '\n r'('\n # file:line:char, where file (12, 16), line (13, 17), char (14)\n r'(([^ ]+):(\\d+):(\\d+))|(([^ ]+):(\\d+))'\n # or mod+off, where mod (19, 31), off (21, 32)\n r'|'\n r'(\\(([^+]+)(\\+([xX0-9a-fA-F]+))?\\)))'\n r')'\n # If there is only fun info, extract\n r'|'\n r'(in\\s*(((.*)\\+([xX0-9a-fA-F]+))|(.*)))'\n # If there is only mod info, extract\n r'|'\n r'(\\((((.*)\\+([xX0-9a-fA-F]+))|(.*))\\))'\n r')')\nSAN_ADDR_REGEX = re.compile(r'.*(ERROR: [a-zA-Z]+Sanitizer)[: ]*(.*) on '\n r'(unknown address |address |)([xX0-9a-fA-F]+)')\nSAN_SEGV_REGEX = re.compile(r'.*([a-zA-Z]+Sanitizer).*(SEGV|access-violation) '\n r'on unknown address ([xX0-9a-fA-F]+)')\nSECURITY_CHECK_FAILURE_REGEX = re.compile(\n r'.*\\[[^\\]]*[:]([^\\](]*).*\\].*Security CHECK failed[:]\\s*(.*)\\.\\s*')\nSECURITY_DCHECK_FAILURE_REGEX = re.compile(\n r'.*\\[[^\\]]*[:]([^\\](]*).*\\].*Security DCHECK failed[:]\\s*(.*)\\.\\s*')\nUBSAN_DIVISION_BY_ZERO_REGEX = re.compile(r'.*division by zero.*')\nUBSAN_FLOAT_CAST_OVERFLOW_REGEX = re.compile(r'.*outside the range of '\n r'representable values.*')\nUBSAN_INCORRECT_FUNCTION_POINTER_REGEX = re.compile(\n r'.*call to function [^\\s]+ through pointer to incorrect function type.*')\nUBSAN_INDEX_OOB_REGEX = re.compile(r'.*out of bounds for type.*')\nUBSAN_UNSIGNED_INTEGER_OVERFLOW_REGEX = re.compile(\n r'.*unsigned integer overflow.*')\nUBSAN_INTEGER_OVERFLOW_REGEX = re.compile(\n r'.*(integer overflow|'\n r'(negation|division) of.*cannot be represented in type).*')\nUBSAN_INVALID_BOOL_VALUE_REGEX = re.compile(\n r'.*not a valid value for type \\'(bool|BOOL)\\'.*')\nUBSAN_INVALID_BUILTIN_REGEX = re.compile(r'.*, which is not a valid argument.*')\nUBSAN_INVALID_ENUM_VALUE_REGEX = re.compile(r'.*not a valid value for type.*')\nUBSAN_MISALIGNED_ADDRESS_REGEX = re.compile(r'.*misaligned address.*')\nUBSAN_NO_RETURN_VALUE_REGEX = re.compile(\n r'.*reached the end of a value-returning function.*')\nUBSAN_NULL_ARGUMENT_REGEX = re.compile(\n r'.*null pointer passed as .*, which is declared to never be null.*')\nUBSAN_NULL_POINTER_READ_REGEX = re.compile(r'.*load of null pointer.*')\nUBSAN_NULL_POINTER_REFERENCE_REGEX = re.compile(\n r'.*(binding to|access within|call on) null pointer.*')\nUBSAN_NULL_POINTER_WRITE_REGEX = re.compile(r'.*store to null pointer.*')\nUBSAN_OBJECT_SIZE_REGEX = re.compile(\n r'.*address .* with insufficient space for an object of type.*')\nUBSAN_POINTER_OVERFLOW_REGEX = re.compile(\n r'.*((addition|subtraction) of unsigned offset |'\n r'pointer index expression with base |'\n r'applying non-zero offset [0-9]+ to null pointer|'\n r'applying zero offset to null pointer).*')\nUBSAN_RETURNS_NONNULL_ATTRIBUTE_REGEX = re.compile(\n r'.*null pointer returned from function declared to never return null.*')\nUBSAN_RUNTIME_ERROR_REGEX = re.compile(r'(.*): runtime error: (.*)')\nUBSAN_SHIFT_ERROR_REGEX = re.compile(r'.*shift.*')\nUBSAN_UNREACHABLE_REGEX = re.compile(\n r'.*execution reached an unreachable program point.*')\nUBSAN_VLA_BOUND_REGEX = re.compile(\n r'.*variable length array bound evaluates to non-positive value.*')\nUBSAN_VPTR_REGEX = re.compile(\n r'(.*): runtime error: '\n r'(member access within|member call on|downcast of)'\n r' address ([xX0-9a-fA-F]+) .* of type (.*)')\nUBSAN_VPTR_INVALID_DOWNCAST_REGEX = re.compile(\n r'.*note: object is of type (.*)')\nUBSAN_VPTR_INVALID_OFFSET_REGEX = re.compile(\n r'.*at offset (\\d+) within object of type (.*)')\nUBSAN_VPTR_INVALID_VPTR_REGEX = re.compile(r'.*note: object has invalid vptr')\nV8_ABORT_FAILURE_REGEX = re.compile(r'^abort: (CSA_ASSERT failed:.*)')\nV8_ABORT_METADATA_REGEX = re.compile(r'(.*) \\[(.*):\\d+\\]$')\nV8_CORRECTNESS_FAILURE_REGEX = re.compile(r'#\\s*V8 correctness failure')\nV8_CORRECTNESS_METADATA_REGEX = re.compile(\n r'#\\s*V8 correctness ((configs|sources|suppression): .*)')\nV8_ERROR_REGEX = re.compile(r'\\s*\\[[^\\]]*\\] V8 error: (.+)\\.$')\nWINDOWS_CDB_STACK_FRAME_REGEX = re.compile(\n r'([0-9a-zA-Z`]+) ' # Child EBP or SP; remove ` if needed (1)\n r'([0-9a-zA-Z`]+) ' # RetAddr; remove ` if needed (2)\n r'([0-9a-zA-Z_]+)' # mod (3)\n r'!(.*)\\+' # fun (4)\n r'([xX0-9a-fA-F]+)') # off (5)\nWINDOWS_CDB_STACK_START_REGEX = re.compile(r'ChildEBP RetAddr')\nWINDOWS_CDB_CRASH_TYPE_ADDRESS_REGEX = re.compile(\n r'Attempt to (.*) [^ ]* address (.*)')\nWINDOWS_CDB_CRASH_TYPE_REGEX = re.compile(\n r'.*DEFAULT_BUCKET_ID[ ]*[:][ ]*([a-zA-Z_]+)')\nWINDOWS_CDB_STACK_OVERFLOW_REGEX = re.compile(\n r'.*ExceptionCode: .*\\(Stack overflow\\).*')\n\n# Golang specific regular expressions.\nGOLANG_DIVISION_BY_ZERO_REGEX = re.compile(\n r'^panic: runtime error: integer divide by zero.*')\nGOLANG_INDEX_OUT_OF_RANGE_REGEX = re.compile(\n r'^panic: runtime error: index out of range.*')\nGOLANG_INVALID_MEMORY_ADDRESS_REGEX = re.compile(\n r'^panic: runtime error: invalid memory address.*')\nGOLANG_MAKESLICE_LEN_OUT_OF_RANGE_REGEX = re.compile(\n r'^panic: runtime error: makeslice: len out of range.*')\nGOLANG_SLICE_BOUNDS_OUT_OF_RANGE_REGEX = re.compile(\n r'^panic: runtime error: slice bounds out of range.*')\nGOLANG_STACK_OVERFLOW_REGEX = re.compile(r'^fatal error: stack overflow.*')\n\nGOLANG_CRASH_TYPES_MAP = [\n (GOLANG_DIVISION_BY_ZERO_REGEX, 'Integer divide by zero'),\n (GOLANG_INDEX_OUT_OF_RANGE_REGEX, 'Index out of range'),\n (GOLANG_INVALID_MEMORY_ADDRESS_REGEX, 'Invalid memory address'),\n (GOLANG_MAKESLICE_LEN_OUT_OF_RANGE_REGEX, 'Makeslice: len out of range'),\n (GOLANG_SLICE_BOUNDS_OUT_OF_RANGE_REGEX, 'Slice bounds out of range'),\n (GOLANG_STACK_OVERFLOW_REGEX, 'Stack overflow'),\n]\n\nGOLANG_FATAL_ERROR_REGEX = re.compile(r'^fatal error: (.*)')\n\nGOLANG_STACK_FRAME_FUNCTION_REGEX = re.compile(\n r'^([0-9a-zA-Z\\.\\-\\_\\\\\\/\\(\\)\\*]+)\\([x0-9a-f\\s,\\.{}]*\\)$')\n\n# Python specific regular expressions.\nPYTHON_UNHANDLED_EXCEPTION = re.compile(\n r'^\\s*=== Uncaught Python exception: ===$')\n\nPYTHON_CRASH_TYPES_MAP = [\n (PYTHON_UNHANDLED_EXCEPTION, 'Uncaught exception'),\n]\n\nPYTHON_STACK_FRAME_FUNCTION_REGEX = re.compile(\n # File \"<embedded stdlib>/gzip.py\", line 421, in _read_gzip_header\n r'^\\s*File \"([^\"]+)\", line (\\d+), in (.+)$')\n\n# Mappings of Android kernel error status codes to strings.\nANDROID_KERNEL_STATUS_TO_STRING = {\n 0b0001: 'Alignment Fault',\n 0b0100: 'Instruction Cache Maintenance Fault',\n 0b1100: 'L1 Translation',\n 0b1110: 'L2 Translation',\n 0b0101: 'Translation Fault, Section',\n 0b0111: 'Translation Fault, Page',\n 0b0011: 'Access Flag Fault, Section',\n 0b0110: 'Access Flag Fault, Page',\n 0b1001: 'Domain Fault, Section',\n 0b1011: 'Domain Fault, Page',\n 0b1101: 'Permission Fault, Section',\n 0b1111: 'Permissions Fault, Page',\n}\n\n# Ignore lists.\nSTACK_FRAME_IGNORE_REGEXES = [\n # Function names (exact match).\n r'^abort$',\n r'^exit$',\n r'^pthread_create$',\n r'^pthread_kill$',\n r'^raise$',\n r'^tgkill$',\n r'^__chk_fail$',\n r'^__fortify_fail$',\n\n # Function names (startswith).\n r'^(|__)aeabi_',\n r'^(|__)memcmp',\n r'^(|__)memcpy',\n r'^(|__)memmove',\n r'^(|__)memset',\n r'^(|__)strcmp',\n r'^(|__)strcpy',\n r'^(|__)strdup',\n r'^(|__)strlen',\n r'^(|__)strncpy',\n r'^<null>',\n r'^Abort\\(',\n r'^CFCrash',\n r'^ExitCallback',\n r'^IsSandboxedProcess',\n r'^LLVMFuzzerTestOneInput',\n r'^MSanAtExitWrapper',\n r'^New',\n r'^RaiseException',\n r'^SbSystemBreakIntoDebugger',\n r'^SignalAction',\n r'^SignalHandler',\n r'^TestOneProtoInput',\n r'^WTF::',\n r'^WTFCrash',\n r'^X11Error',\n r'^_L_unlock_',\n r'^_\\$LT\\$',\n r'^__GI_',\n r'^__asan::',\n r'^__asan_',\n r'^__assert_',\n r'^__cxa_atexit',\n r'^__cxa_rethrow',\n r'^__cxa_throw',\n r'^__dump_stack',\n r'^__hwasan::',\n r'^__hwasan_',\n r'^__interceptor_',\n r'^__kasan_',\n r'^__libc_',\n r'^__lsan::',\n r'^__lsan_',\n r'^__msan::',\n r'^__msan_',\n r'^__pthread_kill',\n r'^__run_exit_handlers',\n r'^__rust_try',\n r'^__sanitizer::',\n r'^__sanitizer_',\n r'^__tsan::',\n r'^__tsan_',\n r'^__ubsan::',\n r'^__ubsan_',\n r'^_asan_',\n r'^_hwasan_',\n r'^_lsan_',\n r'^_msan_',\n r'^_objc_terminate',\n r'^_sanitizer_',\n r'^_start',\n r'^_tsan_',\n r'^_ubsan_',\n r'^abort',\n r'^alloc::',\n r'^android\\.app\\.ActivityManagerProxy\\.',\n r'^android\\.os\\.Parcel\\.',\n r'^art::Thread::CreateNativeThread',\n r'^asan_',\n r'^asan\\.module_ctor',\n r'^asan\\.module_dtor',\n r'^calloc',\n r'^check_memory_region',\n r'^common_exit',\n r'^core::fmt::write',\n r'^delete',\n r'^demangling_terminate_handler',\n r'^dump_backtrace',\n r'^dump_stack',\n r'^exit_or_terminate_process',\n r'^fpehandler\\(',\n r'^free',\n r'^fuzzer::',\n r'^g_log',\n r'^generic_cpp_',\n r'^gsignal',\n r'^kasan_',\n r'^libfuzzer_sys::initialize',\n r'^main',\n r'^malloc',\n r'^mozalloc_',\n r'^new',\n r'^object_err',\n r'^operator',\n r'^panic_abort::',\n r'^print_trailer',\n r'^realloc',\n r'^rust_begin_unwind',\n r'^rust_fuzzer_test_input',\n r'^rust_oom',\n r'^rust_panic',\n r'^scanf',\n r'^show_stack',\n r'^std::__terminate',\n r'^std::io::Write::write_fmt',\n r'^std::panic',\n r'^std::process::abort',\n r'^std::sys::unix::abort',\n r'^std::sys_common::backtrace',\n r'^__rust_start_panic',\n r'^__scrt_common_main_seh',\n r'^libgcc_s.so.*',\n\n # Functions names (contains).\n r'.*ASAN_OnSIGSEGV',\n r'.*BaseThreadInitThunk',\n r'.*DebugBreak',\n r'.*DefaultDcheckHandler',\n r'.*ForceCrashOnSigAbort',\n r'.*MemoryProtection::CMemoryProtector',\n r'.*PartitionAlloc',\n r'.*RtlFreeHeap',\n r'.*RtlInitializeExceptionChain',\n r'.*RtlReportCriticalFailure',\n r'.*RtlUserThreadStart',\n r'.*RtlpHeapHandleError',\n r'.*RtlpLogHeapFailure',\n r'.*SkDebugf',\n r'.*StackDumpSignalHandler',\n r'.*__android_log_assert',\n r'.*__tmainCRTStartup',\n r'.*_asan_rtl_',\n r'.*agent::asan::',\n r'.*allocator_shim',\n r'.*asan_Heap',\n r'.*asan_check_access',\n r'.*asan_osx_dynamic\\.dylib',\n r'.*assert',\n r'.*base::FuzzedDataProvider',\n r'.*base::allocator',\n r'.*base::android::CheckException',\n r'.*base::debug::BreakDebugger',\n r'.*base::debug::CollectStackTrace',\n r'.*base::debug::StackTrace::StackTrace',\n r'.*ieee754\\-',\n r'.*libpthread',\n r'.*logger',\n r'.*logging::CheckError',\n r'.*logging::ErrnoLogMessage',\n r'.*logging::LogMessage',\n r'.*stdext::exception::what',\n r'.*v8::base::OS::Abort',\n\n # File paths.\n r'.* base/callback',\n r'.* /rust(|c)/',\n r'.*/AOSP\\-toolchain/',\n r'.*/bindings/ToV8\\.h',\n r'.*/crosstool/',\n r'.*/gcc/',\n r'.*/glibc\\-',\n r'.*/jemalloc/',\n r'.*/libc\\+\\+',\n r'.*/libc/',\n r'.*/llvm\\-build/',\n r'.*/minkernel/crts/',\n r'.*/sanitizer_common/',\n r'.*/tcmalloc/',\n r'.*/vc/include/',\n r'.*/vctools/crt/',\n r'.*/win_toolchain/',\n r'.*libc\\+\\+/',\n\n # Wrappers from honggfuzz/libhfuzz/memorycmp.c.\n r'.*/memorycmp\\.c',\n\n # Others (uncategorized).\n r'.*\\+Unknown',\n r'.*<unknown module>',\n r'.*Inline Function @',\n r'^<unknown>$',\n r'^\\[vdso\\]$',\n r'^linux-gate.so.*$',\n\n # Golang specific frames to ignore.\n r'^panic$',\n r'^runtime\\.',\n\n # Fuchsia specific.\n r'^CrashTrampolineAsm',\n r'^libc_io_functions_not_implemented_use_fdio_instead',\n r'^<libclang_rt.asan.so>',\n r'^__zx_panic',\n r'^syslog::LogMessage',\n\n # Android kernel stack frame ignores.\n r'^print_address_description',\n r'^_etext',\n\n # Swift specific.\n r'^_swift_stdlib_',\n\n # googlefuzztest specific.\n r'.*fuzztest::internal::',\n\n # V8 specific.\n r'^V8_Fatal',\n # Ignore error-throwing frames, the bug is in the caller.\n r'^blink::ReportV8FatalError',\n r'^v8::api_internal::ToLocalEmpty',\n]\n\nSTACK_FRAME_IGNORE_REGEXES_IF_SYMBOLIZED = [\n r'.*libc\\.so',\n r'.*libc\\+\\+\\.so',\n r'.*libc\\+\\+_shared\\.so',\n r'.*libstdc\\+\\+\\.so',\n r'.*libc-.*\\.so',\n]\n\nIGNORE_CRASH_TYPES_FOR_ABRT_BREAKPOINT_AND_ILLS = [\n 'Arbitrary file open',\n 'ASSERT',\n 'CHECK failure',\n 'Command injection',\n 'DCHECK failure',\n 'Fatal error',\n 'Security CHECK failure',\n 'Security DCHECK failure',\n 'V8 API error',\n]\n\nSTATE_STOP_MARKERS = [\n 'Direct leak of',\n 'Uninitialized value was stored to memory at',\n 'allocated by thread',\n 'created by main thread at',\n 'located in stack of thread',\n 'previously allocated by',\n]\n\nUBSAN_CRASH_TYPES_MAP = [\n (UBSAN_DIVISION_BY_ZERO_REGEX, 'Divide-by-zero'),\n (UBSAN_FLOAT_CAST_OVERFLOW_REGEX, 'Float-cast-overflow'),\n (UBSAN_INCORRECT_FUNCTION_POINTER_REGEX, 'Incorrect-function-pointer-type'),\n (UBSAN_INDEX_OOB_REGEX, 'Index-out-of-bounds'),\n (UBSAN_INVALID_BOOL_VALUE_REGEX, 'Invalid-bool-value'),\n (UBSAN_INVALID_BUILTIN_REGEX, 'Invalid-builtin-use'),\n (UBSAN_MISALIGNED_ADDRESS_REGEX, 'Misaligned-address'),\n (UBSAN_NO_RETURN_VALUE_REGEX, 'No-return-value'),\n (UBSAN_NULL_ARGUMENT_REGEX, 'Invalid-null-argument'),\n (UBSAN_NULL_POINTER_READ_REGEX, 'Null-dereference READ'),\n (UBSAN_NULL_POINTER_REFERENCE_REGEX, 'Null-dereference'),\n (UBSAN_NULL_POINTER_WRITE_REGEX, 'Null-dereference WRITE'),\n (UBSAN_OBJECT_SIZE_REGEX, 'Object-size'),\n (UBSAN_POINTER_OVERFLOW_REGEX, 'Pointer-overflow'),\n (UBSAN_RETURNS_NONNULL_ATTRIBUTE_REGEX, 'Invalid-null-return'),\n (UBSAN_SHIFT_ERROR_REGEX, 'Undefined-shift'),\n (UBSAN_UNREACHABLE_REGEX, 'Unreachable code'),\n (UBSAN_UNSIGNED_INTEGER_OVERFLOW_REGEX, 'Unsigned-integer-overflow'),\n (UBSAN_VLA_BOUND_REGEX, 'Non-positive-vla-bound-value'),\n\n # The following types are supersets of other types, and should be placed\n # at the end to avoid subsuming crashes from the more specialized types.\n (UBSAN_INVALID_ENUM_VALUE_REGEX, 'Invalid-enum-value'),\n (UBSAN_INTEGER_OVERFLOW_REGEX, 'Integer-overflow'),\n]\n\n# Additional regexes for cleaning up format.\nSTRIP_STRUCTURE_REGEXES = [\n re.compile(r'^in (.*)'), # sanitizers have prefix for function if present\n re.compile(r'^\\((.*)\\)$'), # sanitizers wrap module if no function\n]\n\n# Other constants.\nLINE_LENGTH_CAP = 80\nMAX_CRASH_STATE_FRAMES = 3\nMAX_CYCLE_LENGTH = 10\nREPEATED_CYCLE_COUNT = 3\n\n# Stackframe format specifications.\nCHROME_STACK_FRAME_SPEC = stack_parser.StackFrameSpec(\n address=3, function_name=4)\nCHROME_WIN_STACK_FRAME_SPEC = stack_parser.StackFrameSpec(\n function_name=1,\n function_base=2,\n function_offset=3,\n filename=5,\n fileline=6,\n base=10)\nCHROME_MAC_STACK_FRAME_SPEC = stack_parser.StackFrameSpec(\n address=5, function_name=6, function_offset=7, module_name=2, base=10)\nSAN_STACK_FRAME_SPEC = stack_parser.StackFrameSpec(\n address=2,\n function_name=[7, 5, 23],\n function_offset=8,\n filename=[12, 16],\n fileline=[13, 17],\n module_name=[19, 31],\n module_offset=[21, 32])\nWINDOWS_CDB_STACK_FRAME_SPEC = stack_parser.StackFrameSpec(\n address=1, function_name=4, function_offset=5, module_name=3)\n",
"path": "src/clusterfuzz/stacktraces/constants.py"
}
] | diff --git a/src/clusterfuzz/_internal/tests/core/crash_analysis/stack_parsing/stack_analyzer_data/libgcc_s.txt b/src/clusterfuzz/_internal/tests/core/crash_analysis/stack_parsing/stack_analyzer_data/libgcc_s.txt
new file mode 100644
index 0000000000..7bfb2326d7
--- /dev/null
+++ b/src/clusterfuzz/_internal/tests/core/crash_analysis/stack_parsing/stack_analyzer_data/libgcc_s.txt
@@ -0,0 +1,18 @@
+[Environment] ASAN_OPTIONS=alloc_dealloc_mismatch=0:allocator_may_return_null=1:allow_user_segv_handler=1:check_malloc_usable_size=0:detect_leaks=1:detect_odr_violation=0:detect_stack_use_after_return=1:external_symbolizer_path=/mnt/scratch0/clusterfuzz/resources/platform/linux/llvm-symbolizer:fast_unwind_on_fatal=1:handle_abort=1:handle_segv=1:handle_sigbus=1:handle_sigfpe=1:handle_sigill=1:handle_sigtrap=1:max_uar_stack_size_log=16:print_scariness=1:print_summary=1:print_suppressions=0:redzone=128:strict_memcmp=0:symbolize=1:symbolize_inline_frames=false:use_sigaltstack=1
+[Command line] /mnt/scratch0/clusterfuzz/bot/builds/v8-asan_linux-debug_1f17dda3b0e56007440db98eafbaad9618b3d0fa/revisions/d8-asan-linux-debug-v8-component-84149/d8 --random-seed=-926720076 --fuzzing --fuzzing --disable-abortjs --disable-in-process-stack-traces --simulate-errors --no-enable-sse4_2 --interrupt-budget=1000 --fuzzing /mnt/scratch0/clusterfuzz/bot/inputs/fuzzer-testcases/fuzz-00240.js
++----------------------------------------Debug Build Stacktrace----------------------------------------+
+#
+# Fatal error in ../../src/d8/d8.cc, line 3943
+# Fake error.
+#
+#
+#
+#FailureMessage Object: 0x7f080986b060AddressSanitizer:DEADLYSIGNAL
+=================================================================
+==3592732==ERROR: AddressSanitizer: SEGV on unknown address 0x7df7ff9da4bf (pc 0x7f0809f0dc50 bp 0x7ffebecdd410 sp 0x7ffebecdd2c0 T0)
+==3592732==The signal is caused by a READ memory access.
+SCARINESS: 20 (wild-addr-read)
+ #0 0x7f0809f0dc50 in libgcc_s.so.1
+AddressSanitizer can not provide additional info.
+SUMMARY: AddressSanitizer: SEGV (/lib/x86_64-linux-gnu/libgcc_s.so.1+0xfc50) (BuildId: 4abd133cc80e01bb388a9c42d9e3cb338836544a)
+==3592732==ABORTING
diff --git a/src/clusterfuzz/_internal/tests/core/crash_analysis/stack_parsing/stack_analyzer_test.py b/src/clusterfuzz/_internal/tests/core/crash_analysis/stack_parsing/stack_analyzer_test.py
index 82c7521556..06f4b625f5 100644
--- a/src/clusterfuzz/_internal/tests/core/crash_analysis/stack_parsing/stack_analyzer_test.py
+++ b/src/clusterfuzz/_internal/tests/core/crash_analysis/stack_parsing/stack_analyzer_test.py
@@ -3374,3 +3374,15 @@ def test_go_braces(self):
self._validate_get_crash_data(data, expected_type, expected_address,
expected_state, expected_stacktrace,
expected_security_flag)
+
+ def test_ignore_libgcc_s(self):
+ """Test ignore libgcc_s.so.1"""
+ data = self._read_test_data('libgcc_s.txt')
+ expected_type = 'UNKNOWN READ'
+ expected_state = 'NULL'
+ expected_address = '0x7df7ff9da4bf'
+ expected_stacktrace = data
+ expected_security_flag = True
+ self._validate_get_crash_data(data, expected_type, expected_address,
+ expected_state, expected_stacktrace,
+ expected_security_flag)
diff --git a/src/clusterfuzz/stacktraces/constants.py b/src/clusterfuzz/stacktraces/constants.py
index 14bb717afe..2df512e20d 100644
--- a/src/clusterfuzz/stacktraces/constants.py
+++ b/src/clusterfuzz/stacktraces/constants.py
@@ -469,6 +469,7 @@
r'^std::sys_common::backtrace',
r'^__rust_start_panic',
r'^__scrt_common_main_seh',
+ r'^libgcc_s.so.*',
# Functions names (contains).
r'.*ASAN_OnSIGSEGV',
|
statsmodels__statsmodels-1027 | add_constant incorrectly detects constant column
statsmodels/statsmodels/tools/tools.py:245 checks for columns with unit variance, not zero variance when looking for constant columns. Any z-scored data will, of course, have unit variance. The line should be
if np.any(data.var(0) == 0):
| [
{
"content": "'''\nUtility functions models code\n'''\n\nimport numpy as np\nimport numpy.lib.recfunctions as nprf\nimport numpy.linalg as L\nfrom scipy.interpolate import interp1d\nfrom scipy.linalg import svdvals\nfrom statsmodels.distributions import (ECDF, monotone_fn_inverter,\n StepFunction)\nfrom statsmodels.tools.data import _is_using_pandas\nfrom statsmodels.compatnp.py3k import asstr2\nfrom pandas import DataFrame\n\ndef _make_dictnames(tmp_arr, offset=0):\n \"\"\"\n Helper function to create a dictionary mapping a column number\n to the name in tmp_arr.\n \"\"\"\n col_map = {}\n for i,col_name in enumerate(tmp_arr):\n col_map.update({i+offset : col_name})\n return col_map\n\ndef drop_missing(Y,X=None, axis=1):\n \"\"\"\n Returns views on the arrays Y and X where missing observations are dropped.\n\n Y : array-like\n X : array-like, optional\n axis : int\n Axis along which to look for missing observations. Default is 1, ie.,\n observations in rows.\n\n Returns\n -------\n Y : array\n All Y where the\n X : array\n\n Notes\n -----\n If either Y or X is 1d, it is reshaped to be 2d.\n \"\"\"\n Y = np.asarray(Y)\n if Y.ndim == 1:\n Y = Y[:,None]\n if X is not None:\n X = np.array(X)\n if X.ndim == 1:\n X = X[:,None]\n keepidx = np.logical_and(~np.isnan(Y).any(axis),~np.isnan(X).any(axis))\n return Y[keepidx], X[keepidx]\n else:\n keepidx = ~np.isnan(Y).any(axis)\n return Y[keepidx]\n\n#TODO: needs to better preserve dtype and be more flexible\n# ie., if you still have a string variable in your array you don't\n# want to cast it to float\n#TODO: add name validator (ie., bad names for datasets.grunfeld)\ndef categorical(data, col=None, dictnames=False, drop=False, ):\n '''\n Returns a dummy matrix given an array of categorical variables.\n\n Parameters\n ----------\n data : array\n A structured array, recarray, or array. This can be either\n a 1d vector of the categorical variable or a 2d array with\n the column specifying the categorical variable specified by the col\n argument.\n col : 'string', int, or None\n If data is a structured array or a recarray, `col` can be a string\n that is the name of the column that contains the variable. For all\n arrays `col` can be an int that is the (zero-based) column index\n number. `col` can only be None for a 1d array. The default is None.\n dictnames : bool, optional\n If True, a dictionary mapping the column number to the categorical\n name is returned. Used to have information about plain arrays.\n drop : bool\n Whether or not keep the categorical variable in the returned matrix.\n\n Returns\n --------\n dummy_matrix, [dictnames, optional]\n A matrix of dummy (indicator/binary) float variables for the\n categorical data. If dictnames is True, then the dictionary\n is returned as well.\n\n Notes\n -----\n This returns a dummy variable for EVERY distinct variable. If a\n a structured or recarray is provided, the names for the new variable is the\n old variable name - underscore - category name. So if the a variable\n 'vote' had answers as 'yes' or 'no' then the returned array would have to\n new variables-- 'vote_yes' and 'vote_no'. There is currently\n no name checking.\n\n Examples\n --------\n >>> import numpy as np\n >>> import statsmodels.api as sm\n\n Univariate examples\n\n >>> import string\n >>> string_var = [string.lowercase[0:5], string.lowercase[5:10], \\\n string.lowercase[10:15], string.lowercase[15:20], \\\n string.lowercase[20:25]]\n >>> string_var *= 5\n >>> string_var = np.asarray(sorted(string_var))\n >>> design = sm.tools.categorical(string_var, drop=True)\n\n Or for a numerical categorical variable\n\n >>> instr = np.floor(np.arange(10,60, step=2)/10)\n >>> design = sm.tools.categorical(instr, drop=True)\n\n With a structured array\n\n >>> num = np.random.randn(25,2)\n >>> struct_ar = np.zeros((25,1), dtype=[('var1', 'f4'),('var2', 'f4'), \\\n ('instrument','f4'),('str_instr','a5')])\n >>> struct_ar['var1'] = num[:,0][:,None]\n >>> struct_ar['var2'] = num[:,1][:,None]\n >>> struct_ar['instrument'] = instr[:,None]\n >>> struct_ar['str_instr'] = string_var[:,None]\n >>> design = sm.tools.categorical(struct_ar, col='instrument', drop=True)\n\n Or\n\n >>> design2 = sm.tools.categorical(struct_ar, col='str_instr', drop=True)\n '''\n if isinstance(col, (list, tuple)):\n try:\n assert len(col) == 1\n col = col[0]\n except:\n raise ValueError(\"Can only convert one column at a time\")\n\n #TODO: add a NameValidator function\n # catch recarrays and structured arrays\n if data.dtype.names or data.__class__ is np.recarray:\n if not col and np.squeeze(data).ndim > 1:\n raise IndexError(\"col is None and the input array is not 1d\")\n if isinstance(col, int):\n col = data.dtype.names[col]\n if col is None and data.dtype.names and len(data.dtype.names) == 1:\n col = data.dtype.names[0]\n\n tmp_arr = np.unique(data[col])\n\n # if the cols are shape (#,) vs (#,1) need to add an axis and flip\n _swap = True\n if data[col].ndim == 1:\n tmp_arr = tmp_arr[:,None]\n _swap = False\n tmp_dummy = (tmp_arr==data[col]).astype(float)\n if _swap:\n tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1,0)\n\n if not tmp_arr.dtype.names: # how do we get to this code path?\n tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr)]\n elif tmp_arr.dtype.names:\n tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr.tolist())]\n\n# prepend the varname and underscore, if col is numeric attribute lookup\n# is lost for recarrays...\n if col is None:\n try:\n col = data.dtype.names[0]\n except:\n col = 'var'\n#TODO: the above needs to be made robust because there could be many\n# var_yes, var_no varaibles for instance.\n tmp_arr = [col + '_'+ item for item in tmp_arr]\n#TODO: test this for rec and structured arrays!!!\n\n if drop is True:\n if len(data.dtype) <= 1:\n if tmp_dummy.shape[0] < tmp_dummy.shape[1]:\n tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1,0)\n dt = zip(tmp_arr, [tmp_dummy.dtype.str]*len(tmp_arr))\n # preserve array type\n return np.array(map(tuple, tmp_dummy.tolist()),\n dtype=dt).view(type(data))\n\n data=nprf.drop_fields(data, col, usemask=False,\n asrecarray=type(data) is np.recarray)\n data=nprf.append_fields(data, tmp_arr, data=tmp_dummy,\n usemask=False, asrecarray=type(data) is np.recarray)\n return data\n\n # handle ndarrays and catch array-like for an error\n elif data.__class__ is np.ndarray or not isinstance(data,np.ndarray):\n if not isinstance(data, np.ndarray):\n raise NotImplementedError(\"Array-like objects are not supported\")\n\n if isinstance(col, int):\n offset = data.shape[1] # need error catching here?\n tmp_arr = np.unique(data[:,col])\n tmp_dummy = (tmp_arr[:,np.newaxis]==data[:,col]).astype(float)\n tmp_dummy = tmp_dummy.swapaxes(1,0)\n if drop is True:\n offset -= 1\n data = np.delete(data, col, axis=1).astype(float)\n data = np.column_stack((data,tmp_dummy))\n if dictnames is True:\n col_map = _make_dictnames(tmp_arr, offset)\n return data, col_map\n return data\n elif col is None and np.squeeze(data).ndim == 1:\n tmp_arr = np.unique(data)\n tmp_dummy = (tmp_arr[:,None]==data).astype(float)\n tmp_dummy = tmp_dummy.swapaxes(1,0)\n if drop is True:\n if dictnames is True:\n col_map = _make_dictnames(tmp_arr)\n return tmp_dummy, col_map\n return tmp_dummy\n else:\n data = np.column_stack((data, tmp_dummy))\n if dictnames is True:\n col_map = _make_dictnames(tmp_arr, offset=1)\n return data, col_map\n return data\n else:\n raise IndexError(\"The index %s is not understood\" % col)\n\ndef _series_add_constant(data, prepend):\n const = np.ones_like(data)\n const.name = 'const'\n if not prepend:\n results = DataFrame([data, const]).T\n results.columns = [data.name, 'const']\n else:\n results = DataFrame([const, data]).T\n results.columns = ['const', data.name]\n return results\n\ndef _dataframe_add_constant(data, prepend):\n # check for const.\n if np.any(data.var(0) == 1):\n return data\n if prepend:\n data.insert(0, 'const', 1)\n else:\n data['const'] = 1\n return data\n\ndef _pandas_add_constant(data, prepend):\n from pandas import Series\n if isinstance(data, Series):\n return _series_add_constant(data, prepend)\n else:\n return _dataframe_add_constant(data, prepend)\n\n\n#TODO: add an axis argument to this for sysreg\ndef add_constant(data, prepend=True):\n '''\n This appends a column of ones to an array if prepend==False.\n\n For ndarrays and pandas.DataFrames, checks to make sure a constant is not\n already included. If there is at least one column of ones then the\n original object is returned. Does not check for a constant if a structured\n or recarray is\n given.\n\n Parameters\n ----------\n data : array-like\n `data` is the column-ordered design matrix\n prepend : bool\n True and the constant is prepended rather than appended.\n\n Returns\n -------\n data : array\n The original array with a constant (column of ones) as the first or\n last column.\n '''\n if _is_using_pandas(data, None):\n # work on a copy\n return _pandas_add_constant(data.copy(), prepend)\n else:\n data = np.asarray(data)\n if not data.dtype.names:\n var0 = data.var(0) == 0\n if np.any(var0):\n return data\n data = np.column_stack((data, np.ones((data.shape[0], 1))))\n if prepend:\n return np.roll(data, 1, 1)\n else:\n return_rec = data.__class__ is np.recarray\n if prepend:\n ones = np.ones((data.shape[0], 1), dtype=[('const', float)])\n data = nprf.append_fields(ones, data.dtype.names, [data[i] for\n i in data.dtype.names], usemask=False, asrecarray=return_rec)\n else:\n data = nprf.append_fields(data, 'const', np.ones(data.shape[0]),\n usemask=False, asrecarray = return_rec)\n return data\n\n\ndef isestimable(C, D):\n \"\"\" True if (Q, P) contrast `C` is estimable for (N, P) design `D`\n\n From an Q x P contrast matrix `C` and an N x P design matrix `D`, checks if\n the contrast `C` is estimable by looking at the rank of ``vstack([C,D])``\n and verifying it is the same as the rank of `D`.\n\n Parameters\n ----------\n C : (Q, P) array-like\n contrast matrix. If `C` has is 1 dimensional assume shape (1, P)\n D: (N, P) array-like\n design matrix\n\n Returns\n -------\n tf : bool\n True if the contrast `C` is estimable on design `D`\n\n Examples\n --------\n >>> D = np.array([[1, 1, 1, 0, 0, 0],\n ... [0, 0, 0, 1, 1, 1],\n ... [1, 1, 1, 1, 1, 1]]).T\n >>> isestimable([1, 0, 0], D)\n False\n >>> isestimable([1, -1, 0], D)\n True\n \"\"\"\n C = np.asarray(C)\n D = np.asarray(D)\n if C.ndim == 1:\n C = C[None, :]\n if C.shape[1] != D.shape[1]:\n raise ValueError('Contrast should have %d columns' % D.shape[1])\n new = np.vstack([C, D])\n if rank(new) != rank(D):\n return False\n return True\n\n\ndef recipr(X):\n \"\"\"\n Return the reciprocal of an array, setting all entries less than or\n equal to 0 to 0. Therefore, it presumes that X should be positive in\n general.\n \"\"\"\n x = np.maximum(np.asarray(X).astype(np.float64), 0)\n return np.greater(x, 0.) / (x + np.less_equal(x, 0.))\n\ndef recipr0(X):\n \"\"\"\n Return the reciprocal of an array, setting all entries equal to 0\n as 0. It does not assume that X should be positive in\n general.\n \"\"\"\n test = np.equal(np.asarray(X), 0)\n return np.where(test, 0, 1. / X)\n\ndef clean0(matrix):\n \"\"\"\n Erase columns of zeros: can save some time in pseudoinverse.\n \"\"\"\n colsum = np.add.reduce(matrix**2, 0)\n val = [matrix[:,i] for i in np.flatnonzero(colsum)]\n return np.array(np.transpose(val))\n\ndef rank(X, cond=1.0e-12):\n \"\"\"\n Return the rank of a matrix X based on its generalized inverse,\n not the SVD.\n \"\"\"\n X = np.asarray(X)\n if len(X.shape) == 2:\n D = svdvals(X)\n return int(np.add.reduce(np.greater(D / D.max(), cond).astype(np.int32)))\n else:\n return int(not np.alltrue(np.equal(X, 0.)))\n\ndef fullrank(X, r=None):\n \"\"\"\n Return a matrix whose column span is the same as X.\n\n If the rank of X is known it can be specified as r -- no check\n is made to ensure that this really is the rank of X.\n\n \"\"\"\n\n if r is None:\n r = rank(X)\n\n V, D, U = L.svd(X, full_matrices=0)\n order = np.argsort(D)\n order = order[::-1]\n value = []\n for i in range(r):\n value.append(V[:,order[i]])\n return np.asarray(np.transpose(value)).astype(np.float64)\n\nStepFunction = np.deprecate(StepFunction,\n old_name = 'statsmodels.tools.tools.StepFunction',\n new_name = 'statsmodels.distributions.StepFunction')\nmonotone_fn_inverter = np.deprecate(monotone_fn_inverter,\n old_name = 'statsmodels.tools.tools.monotone_fn_inverter',\n new_name = 'statsmodels.distributions.monotone_fn_inverter')\nECDF = np.deprecate(ECDF,\n old_name = 'statsmodels.tools.tools.ECDF',\n new_name = 'statsmodels.distributions.ECDF')\n\n\ndef unsqueeze(data, axis, oldshape):\n \"\"\"\n Unsqueeze a collapsed array\n\n >>> from numpy import mean\n >>> from numpy.random import standard_normal\n >>> x = standard_normal((3,4,5))\n >>> m = mean(x, axis=1)\n >>> m.shape\n (3, 5)\n >>> m = unsqueeze(m, 1, x.shape)\n >>> m.shape\n (3, 1, 5)\n >>>\n \"\"\"\n newshape = list(oldshape)\n newshape[axis] = 1\n return data.reshape(newshape)\n\ndef chain_dot(*arrs):\n \"\"\"\n Returns the dot product of the given matrices.\n\n Parameters\n ----------\n arrs: argument list of ndarray\n\n Returns\n -------\n Dot product of all arguments.\n\n Example\n -------\n >>> import numpy as np\n >>> from statsmodels.tools import chain_dot\n >>> A = np.arange(1,13).reshape(3,4)\n >>> B = np.arange(3,15).reshape(4,3)\n >>> C = np.arange(5,8).reshape(3,1)\n >>> chain_dot(A,B,C)\n array([[1820],\n [4300],\n [6780]])\n \"\"\"\n return reduce(lambda x, y: np.dot(y, x), arrs[::-1])\n\ndef webuse(data, baseurl='http://www.stata-press.com/data/r11/', as_df=True):\n \"\"\"\n Parameters\n ----------\n data : str\n Name of dataset to fetch.\n baseurl : str\n The base URL to the stata datasets.\n as_df : bool\n If True, returns a `pandas.DataFrame`\n\n Returns\n -------\n dta : Record Array\n A record array containing the Stata dataset.\n\n Examples\n --------\n >>> dta = webuse('auto')\n\n Notes\n -----\n Make sure baseurl has trailing forward slash. Doesn't do any\n error checking in response URLs.\n \"\"\"\n # lazy imports\n from statsmodels.iolib import genfromdta\n from urllib2 import urlopen\n from urlparse import urljoin\n from StringIO import StringIO\n\n url = urljoin(baseurl, data+'.dta')\n dta = urlopen(url)\n #TODO: this isn't Python 3 compatibile since urlopen returns bytes?\n dta = StringIO(dta.read()) # make it truly file-like\n if as_df: # could make this faster if we don't process dta twice?\n from pandas import DataFrame\n return DataFrame.from_records(genfromdta(dta))\n else:\n return genfromdta(dta)\n\ndef nan_dot(A, B):\n \"\"\"\n Returns np.dot(left_matrix, right_matrix) with the convention that\n nan * 0 = 0 and nan * x = nan if x != 0.\n\n Parameters\n ----------\n A, B : np.ndarrays\n \"\"\"\n # Find out who should be nan due to nan * nonzero\n should_be_nan_1 = np.dot(np.isnan(A), (B != 0))\n should_be_nan_2 = np.dot((A != 0), np.isnan(B))\n should_be_nan = should_be_nan_1 + should_be_nan_2\n\n # Multiply after setting all nan to 0\n # This is what happens if there were no nan * nonzero conflicts\n C = np.dot(np.nan_to_num(A), np.nan_to_num(B))\n\n C[should_be_nan] = np.nan\n\n return C\n\ndef maybe_unwrap_results(results):\n \"\"\"\n Gets raw results back from wrapped results.\n\n Can be used in plotting functions or other post-estimation type\n routines.\n \"\"\"\n return getattr(results, '_results', results)\n",
"path": "statsmodels/tools/tools.py"
}
] | [
{
"content": "'''\nUtility functions models code\n'''\n\nimport numpy as np\nimport numpy.lib.recfunctions as nprf\nimport numpy.linalg as L\nfrom scipy.interpolate import interp1d\nfrom scipy.linalg import svdvals\nfrom statsmodels.distributions import (ECDF, monotone_fn_inverter,\n StepFunction)\nfrom statsmodels.tools.data import _is_using_pandas\nfrom statsmodels.compatnp.py3k import asstr2\nfrom pandas import DataFrame\n\ndef _make_dictnames(tmp_arr, offset=0):\n \"\"\"\n Helper function to create a dictionary mapping a column number\n to the name in tmp_arr.\n \"\"\"\n col_map = {}\n for i,col_name in enumerate(tmp_arr):\n col_map.update({i+offset : col_name})\n return col_map\n\ndef drop_missing(Y,X=None, axis=1):\n \"\"\"\n Returns views on the arrays Y and X where missing observations are dropped.\n\n Y : array-like\n X : array-like, optional\n axis : int\n Axis along which to look for missing observations. Default is 1, ie.,\n observations in rows.\n\n Returns\n -------\n Y : array\n All Y where the\n X : array\n\n Notes\n -----\n If either Y or X is 1d, it is reshaped to be 2d.\n \"\"\"\n Y = np.asarray(Y)\n if Y.ndim == 1:\n Y = Y[:,None]\n if X is not None:\n X = np.array(X)\n if X.ndim == 1:\n X = X[:,None]\n keepidx = np.logical_and(~np.isnan(Y).any(axis),~np.isnan(X).any(axis))\n return Y[keepidx], X[keepidx]\n else:\n keepidx = ~np.isnan(Y).any(axis)\n return Y[keepidx]\n\n#TODO: needs to better preserve dtype and be more flexible\n# ie., if you still have a string variable in your array you don't\n# want to cast it to float\n#TODO: add name validator (ie., bad names for datasets.grunfeld)\ndef categorical(data, col=None, dictnames=False, drop=False, ):\n '''\n Returns a dummy matrix given an array of categorical variables.\n\n Parameters\n ----------\n data : array\n A structured array, recarray, or array. This can be either\n a 1d vector of the categorical variable or a 2d array with\n the column specifying the categorical variable specified by the col\n argument.\n col : 'string', int, or None\n If data is a structured array or a recarray, `col` can be a string\n that is the name of the column that contains the variable. For all\n arrays `col` can be an int that is the (zero-based) column index\n number. `col` can only be None for a 1d array. The default is None.\n dictnames : bool, optional\n If True, a dictionary mapping the column number to the categorical\n name is returned. Used to have information about plain arrays.\n drop : bool\n Whether or not keep the categorical variable in the returned matrix.\n\n Returns\n --------\n dummy_matrix, [dictnames, optional]\n A matrix of dummy (indicator/binary) float variables for the\n categorical data. If dictnames is True, then the dictionary\n is returned as well.\n\n Notes\n -----\n This returns a dummy variable for EVERY distinct variable. If a\n a structured or recarray is provided, the names for the new variable is the\n old variable name - underscore - category name. So if the a variable\n 'vote' had answers as 'yes' or 'no' then the returned array would have to\n new variables-- 'vote_yes' and 'vote_no'. There is currently\n no name checking.\n\n Examples\n --------\n >>> import numpy as np\n >>> import statsmodels.api as sm\n\n Univariate examples\n\n >>> import string\n >>> string_var = [string.lowercase[0:5], string.lowercase[5:10], \\\n string.lowercase[10:15], string.lowercase[15:20], \\\n string.lowercase[20:25]]\n >>> string_var *= 5\n >>> string_var = np.asarray(sorted(string_var))\n >>> design = sm.tools.categorical(string_var, drop=True)\n\n Or for a numerical categorical variable\n\n >>> instr = np.floor(np.arange(10,60, step=2)/10)\n >>> design = sm.tools.categorical(instr, drop=True)\n\n With a structured array\n\n >>> num = np.random.randn(25,2)\n >>> struct_ar = np.zeros((25,1), dtype=[('var1', 'f4'),('var2', 'f4'), \\\n ('instrument','f4'),('str_instr','a5')])\n >>> struct_ar['var1'] = num[:,0][:,None]\n >>> struct_ar['var2'] = num[:,1][:,None]\n >>> struct_ar['instrument'] = instr[:,None]\n >>> struct_ar['str_instr'] = string_var[:,None]\n >>> design = sm.tools.categorical(struct_ar, col='instrument', drop=True)\n\n Or\n\n >>> design2 = sm.tools.categorical(struct_ar, col='str_instr', drop=True)\n '''\n if isinstance(col, (list, tuple)):\n try:\n assert len(col) == 1\n col = col[0]\n except:\n raise ValueError(\"Can only convert one column at a time\")\n\n #TODO: add a NameValidator function\n # catch recarrays and structured arrays\n if data.dtype.names or data.__class__ is np.recarray:\n if not col and np.squeeze(data).ndim > 1:\n raise IndexError(\"col is None and the input array is not 1d\")\n if isinstance(col, int):\n col = data.dtype.names[col]\n if col is None and data.dtype.names and len(data.dtype.names) == 1:\n col = data.dtype.names[0]\n\n tmp_arr = np.unique(data[col])\n\n # if the cols are shape (#,) vs (#,1) need to add an axis and flip\n _swap = True\n if data[col].ndim == 1:\n tmp_arr = tmp_arr[:,None]\n _swap = False\n tmp_dummy = (tmp_arr==data[col]).astype(float)\n if _swap:\n tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1,0)\n\n if not tmp_arr.dtype.names: # how do we get to this code path?\n tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr)]\n elif tmp_arr.dtype.names:\n tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr.tolist())]\n\n# prepend the varname and underscore, if col is numeric attribute lookup\n# is lost for recarrays...\n if col is None:\n try:\n col = data.dtype.names[0]\n except:\n col = 'var'\n#TODO: the above needs to be made robust because there could be many\n# var_yes, var_no varaibles for instance.\n tmp_arr = [col + '_'+ item for item in tmp_arr]\n#TODO: test this for rec and structured arrays!!!\n\n if drop is True:\n if len(data.dtype) <= 1:\n if tmp_dummy.shape[0] < tmp_dummy.shape[1]:\n tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1,0)\n dt = zip(tmp_arr, [tmp_dummy.dtype.str]*len(tmp_arr))\n # preserve array type\n return np.array(map(tuple, tmp_dummy.tolist()),\n dtype=dt).view(type(data))\n\n data=nprf.drop_fields(data, col, usemask=False,\n asrecarray=type(data) is np.recarray)\n data=nprf.append_fields(data, tmp_arr, data=tmp_dummy,\n usemask=False, asrecarray=type(data) is np.recarray)\n return data\n\n # handle ndarrays and catch array-like for an error\n elif data.__class__ is np.ndarray or not isinstance(data,np.ndarray):\n if not isinstance(data, np.ndarray):\n raise NotImplementedError(\"Array-like objects are not supported\")\n\n if isinstance(col, int):\n offset = data.shape[1] # need error catching here?\n tmp_arr = np.unique(data[:,col])\n tmp_dummy = (tmp_arr[:,np.newaxis]==data[:,col]).astype(float)\n tmp_dummy = tmp_dummy.swapaxes(1,0)\n if drop is True:\n offset -= 1\n data = np.delete(data, col, axis=1).astype(float)\n data = np.column_stack((data,tmp_dummy))\n if dictnames is True:\n col_map = _make_dictnames(tmp_arr, offset)\n return data, col_map\n return data\n elif col is None and np.squeeze(data).ndim == 1:\n tmp_arr = np.unique(data)\n tmp_dummy = (tmp_arr[:,None]==data).astype(float)\n tmp_dummy = tmp_dummy.swapaxes(1,0)\n if drop is True:\n if dictnames is True:\n col_map = _make_dictnames(tmp_arr)\n return tmp_dummy, col_map\n return tmp_dummy\n else:\n data = np.column_stack((data, tmp_dummy))\n if dictnames is True:\n col_map = _make_dictnames(tmp_arr, offset=1)\n return data, col_map\n return data\n else:\n raise IndexError(\"The index %s is not understood\" % col)\n\ndef _series_add_constant(data, prepend):\n const = np.ones_like(data)\n const.name = 'const'\n if not prepend:\n results = DataFrame([data, const]).T\n results.columns = [data.name, 'const']\n else:\n results = DataFrame([const, data]).T\n results.columns = ['const', data.name]\n return results\n\ndef _dataframe_add_constant(data, prepend):\n # check for const.\n if np.any(data.var(0) == 0):\n return data\n if prepend:\n data.insert(0, 'const', 1)\n else:\n data['const'] = 1\n return data\n\ndef _pandas_add_constant(data, prepend):\n from pandas import Series\n if isinstance(data, Series):\n return _series_add_constant(data, prepend)\n else:\n return _dataframe_add_constant(data, prepend)\n\n\n#TODO: add an axis argument to this for sysreg\ndef add_constant(data, prepend=True):\n '''\n This appends a column of ones to an array if prepend==False.\n\n For ndarrays and pandas.DataFrames, checks to make sure a constant is not\n already included. If there is at least one column of ones then the\n original object is returned. Does not check for a constant if a structured\n or recarray is\n given.\n\n Parameters\n ----------\n data : array-like\n `data` is the column-ordered design matrix\n prepend : bool\n True and the constant is prepended rather than appended.\n\n Returns\n -------\n data : array\n The original array with a constant (column of ones) as the first or\n last column.\n '''\n if _is_using_pandas(data, None):\n # work on a copy\n return _pandas_add_constant(data.copy(), prepend)\n else:\n data = np.asarray(data)\n if not data.dtype.names:\n var0 = data.var(0) == 0\n if np.any(var0):\n return data\n data = np.column_stack((data, np.ones((data.shape[0], 1))))\n if prepend:\n return np.roll(data, 1, 1)\n else:\n return_rec = data.__class__ is np.recarray\n if prepend:\n ones = np.ones((data.shape[0], 1), dtype=[('const', float)])\n data = nprf.append_fields(ones, data.dtype.names, [data[i] for\n i in data.dtype.names], usemask=False, asrecarray=return_rec)\n else:\n data = nprf.append_fields(data, 'const', np.ones(data.shape[0]),\n usemask=False, asrecarray = return_rec)\n return data\n\n\ndef isestimable(C, D):\n \"\"\" True if (Q, P) contrast `C` is estimable for (N, P) design `D`\n\n From an Q x P contrast matrix `C` and an N x P design matrix `D`, checks if\n the contrast `C` is estimable by looking at the rank of ``vstack([C,D])``\n and verifying it is the same as the rank of `D`.\n\n Parameters\n ----------\n C : (Q, P) array-like\n contrast matrix. If `C` has is 1 dimensional assume shape (1, P)\n D: (N, P) array-like\n design matrix\n\n Returns\n -------\n tf : bool\n True if the contrast `C` is estimable on design `D`\n\n Examples\n --------\n >>> D = np.array([[1, 1, 1, 0, 0, 0],\n ... [0, 0, 0, 1, 1, 1],\n ... [1, 1, 1, 1, 1, 1]]).T\n >>> isestimable([1, 0, 0], D)\n False\n >>> isestimable([1, -1, 0], D)\n True\n \"\"\"\n C = np.asarray(C)\n D = np.asarray(D)\n if C.ndim == 1:\n C = C[None, :]\n if C.shape[1] != D.shape[1]:\n raise ValueError('Contrast should have %d columns' % D.shape[1])\n new = np.vstack([C, D])\n if rank(new) != rank(D):\n return False\n return True\n\n\ndef recipr(X):\n \"\"\"\n Return the reciprocal of an array, setting all entries less than or\n equal to 0 to 0. Therefore, it presumes that X should be positive in\n general.\n \"\"\"\n x = np.maximum(np.asarray(X).astype(np.float64), 0)\n return np.greater(x, 0.) / (x + np.less_equal(x, 0.))\n\ndef recipr0(X):\n \"\"\"\n Return the reciprocal of an array, setting all entries equal to 0\n as 0. It does not assume that X should be positive in\n general.\n \"\"\"\n test = np.equal(np.asarray(X), 0)\n return np.where(test, 0, 1. / X)\n\ndef clean0(matrix):\n \"\"\"\n Erase columns of zeros: can save some time in pseudoinverse.\n \"\"\"\n colsum = np.add.reduce(matrix**2, 0)\n val = [matrix[:,i] for i in np.flatnonzero(colsum)]\n return np.array(np.transpose(val))\n\ndef rank(X, cond=1.0e-12):\n \"\"\"\n Return the rank of a matrix X based on its generalized inverse,\n not the SVD.\n \"\"\"\n X = np.asarray(X)\n if len(X.shape) == 2:\n D = svdvals(X)\n return int(np.add.reduce(np.greater(D / D.max(), cond).astype(np.int32)))\n else:\n return int(not np.alltrue(np.equal(X, 0.)))\n\ndef fullrank(X, r=None):\n \"\"\"\n Return a matrix whose column span is the same as X.\n\n If the rank of X is known it can be specified as r -- no check\n is made to ensure that this really is the rank of X.\n\n \"\"\"\n\n if r is None:\n r = rank(X)\n\n V, D, U = L.svd(X, full_matrices=0)\n order = np.argsort(D)\n order = order[::-1]\n value = []\n for i in range(r):\n value.append(V[:,order[i]])\n return np.asarray(np.transpose(value)).astype(np.float64)\n\nStepFunction = np.deprecate(StepFunction,\n old_name = 'statsmodels.tools.tools.StepFunction',\n new_name = 'statsmodels.distributions.StepFunction')\nmonotone_fn_inverter = np.deprecate(monotone_fn_inverter,\n old_name = 'statsmodels.tools.tools.monotone_fn_inverter',\n new_name = 'statsmodels.distributions.monotone_fn_inverter')\nECDF = np.deprecate(ECDF,\n old_name = 'statsmodels.tools.tools.ECDF',\n new_name = 'statsmodels.distributions.ECDF')\n\n\ndef unsqueeze(data, axis, oldshape):\n \"\"\"\n Unsqueeze a collapsed array\n\n >>> from numpy import mean\n >>> from numpy.random import standard_normal\n >>> x = standard_normal((3,4,5))\n >>> m = mean(x, axis=1)\n >>> m.shape\n (3, 5)\n >>> m = unsqueeze(m, 1, x.shape)\n >>> m.shape\n (3, 1, 5)\n >>>\n \"\"\"\n newshape = list(oldshape)\n newshape[axis] = 1\n return data.reshape(newshape)\n\ndef chain_dot(*arrs):\n \"\"\"\n Returns the dot product of the given matrices.\n\n Parameters\n ----------\n arrs: argument list of ndarray\n\n Returns\n -------\n Dot product of all arguments.\n\n Example\n -------\n >>> import numpy as np\n >>> from statsmodels.tools import chain_dot\n >>> A = np.arange(1,13).reshape(3,4)\n >>> B = np.arange(3,15).reshape(4,3)\n >>> C = np.arange(5,8).reshape(3,1)\n >>> chain_dot(A,B,C)\n array([[1820],\n [4300],\n [6780]])\n \"\"\"\n return reduce(lambda x, y: np.dot(y, x), arrs[::-1])\n\ndef webuse(data, baseurl='http://www.stata-press.com/data/r11/', as_df=True):\n \"\"\"\n Parameters\n ----------\n data : str\n Name of dataset to fetch.\n baseurl : str\n The base URL to the stata datasets.\n as_df : bool\n If True, returns a `pandas.DataFrame`\n\n Returns\n -------\n dta : Record Array\n A record array containing the Stata dataset.\n\n Examples\n --------\n >>> dta = webuse('auto')\n\n Notes\n -----\n Make sure baseurl has trailing forward slash. Doesn't do any\n error checking in response URLs.\n \"\"\"\n # lazy imports\n from statsmodels.iolib import genfromdta\n from urllib2 import urlopen\n from urlparse import urljoin\n from StringIO import StringIO\n\n url = urljoin(baseurl, data+'.dta')\n dta = urlopen(url)\n #TODO: this isn't Python 3 compatibile since urlopen returns bytes?\n dta = StringIO(dta.read()) # make it truly file-like\n if as_df: # could make this faster if we don't process dta twice?\n from pandas import DataFrame\n return DataFrame.from_records(genfromdta(dta))\n else:\n return genfromdta(dta)\n\ndef nan_dot(A, B):\n \"\"\"\n Returns np.dot(left_matrix, right_matrix) with the convention that\n nan * 0 = 0 and nan * x = nan if x != 0.\n\n Parameters\n ----------\n A, B : np.ndarrays\n \"\"\"\n # Find out who should be nan due to nan * nonzero\n should_be_nan_1 = np.dot(np.isnan(A), (B != 0))\n should_be_nan_2 = np.dot((A != 0), np.isnan(B))\n should_be_nan = should_be_nan_1 + should_be_nan_2\n\n # Multiply after setting all nan to 0\n # This is what happens if there were no nan * nonzero conflicts\n C = np.dot(np.nan_to_num(A), np.nan_to_num(B))\n\n C[should_be_nan] = np.nan\n\n return C\n\ndef maybe_unwrap_results(results):\n \"\"\"\n Gets raw results back from wrapped results.\n\n Can be used in plotting functions or other post-estimation type\n routines.\n \"\"\"\n return getattr(results, '_results', results)\n",
"path": "statsmodels/tools/tools.py"
}
] | diff --git a/statsmodels/tools/tests/test_tools.py b/statsmodels/tools/tests/test_tools.py
index 68781181f33..a2ae9568124 100644
--- a/statsmodels/tools/tests/test_tools.py
+++ b/statsmodels/tools/tests/test_tools.py
@@ -376,6 +376,8 @@ def test_pandas_const_df():
def test_pandas_const_df_prepend():
dta = longley.load_pandas().exog
+ # regression test for #1025
+ dta['UNEMP'] /= dta['UNEMP'].std()
dta = tools.add_constant(dta, prepend=True)
assert_string_equal('const', dta.columns[0])
assert_equal(dta.var(0)[0], 0)
diff --git a/statsmodels/tools/tools.py b/statsmodels/tools/tools.py
index 85832d931b1..123404da07c 100644
--- a/statsmodels/tools/tools.py
+++ b/statsmodels/tools/tools.py
@@ -242,7 +242,7 @@ def _series_add_constant(data, prepend):
def _dataframe_add_constant(data, prepend):
# check for const.
- if np.any(data.var(0) == 1):
+ if np.any(data.var(0) == 0):
return data
if prepend:
data.insert(0, 'const', 1)
|
nvaccess__nvda-14351 | Needless state announcement on JMenu and JMenuItem (Java Access Bridge)
<!-- Please read the text in this edit field before filling it in.
Please thoroughly read NVDA's wiki article on how to fill in this template, including how to provide the required files.
Issues may be closed if the required information is not present.
https://github.com/nvaccess/nvda/blob/master/devDocs/githubIssueTemplateExplanationAndExamples.md
Please also note that the NVDA project has a Citizen and Contributor Code of Conduct which can be found at https://github.com/nvaccess/nvda/blob/master/CODE_OF_CONDUCT.md. NV Access expects that all contributors and other community members read and abide by the rules set out in this document while participating or contributing to this project. This includes creating or commenting on issues and pull requests.
Each of the questions and sections below start with multiple hash symbols (#). Place your answers and information on the blank line below each question.
-->
When opening a JMenu and selecting a JMenuItem (or a subclass of JMenuItem), NVDA announces the AccessibleState "selected" or "enabled". These state announcements are needless on menu and menu items and time-consuming when exploring menus.
### Steps to reproduce:
Prerequisites:
You have installed a Java Runtime Environment, e.g. JDK/JRE version 17 or 18.
You have downloaded the SwingSet2 Demonstration Program from: https://docs.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html#swingset2
1. Launch SwingSet2.jar with the Java JRE.
The SwingSet2 application window appears.
2. Open the File menu with keystroke Alt+F.
3. Use arrow down key to navigate to the menu items About and Exit.
### Actual behavior:
<!--
Use "Speak command keys" (NVDA+4) and speech viewer to copy and paste here.
Use braille viewer to copy and paste here.
You may additionally include an explanation.
-->
(I got a German speech output and translated it to English. Maybe the English speech output is different.)
File Menu selected enabled Alt+F
About selected B
Exit selected X
### Expected behavior:
<!--
Use "Speak command keys" (NVDA+4) and speech viewer to copy and paste here.
Use braille viewer to copy and paste here.
You may additionally include an explanation.
-->
File Menu Alt+F
About B
Exit X
### NVDA logs, crash dumps and other attachments:
### System configuration
#### NVDA installed/portable/running from source:
instlled
#### NVDA version:
Version 2022.3.1
#### Windows version:
Microsoft Windows 10 Enterprise
Version 10.0.19044 Build 1904
#### Name and version of other software in use when reproducing the issue:
java version "17.0.4" 2022-07-19 LTS
Java(TM) SE Runtime Environment (build 17.0.4+11-LTS-179)
Java HotSpot(TM) 64-Bit Server VM (build 17.0.4+11-LTS-179, mixed mode, sharing)
#### Other information about your system:
Java Access Bridge is enabled.
### Other questions
#### Does the issue still occur after restarting your computer?
Yes.
#### Have you tried any other versions of NVDA? If so, please report their behaviors.
Same behavior in previous versions of NVDA.
#### If NVDA add-ons are disabled, is your problem still occurring?
Not applicable.
#### Does the issue still occur after you run the COM Registration Fixing Tool in NVDA's tools menu?
Not applicable.
| [
{
"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2006-2022 NV Access Limited, Leonard de Ruijter, Joseph Lee, Renaud Paquay, pvagner\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n\r\nimport ctypes\r\nimport re\r\nfrom typing import (\r\n\tDict,\r\n)\r\nimport eventHandler\r\nimport keyLabels\r\nimport JABHandler\r\nimport controlTypes\r\nimport textUtils\r\nfrom controlTypes import TextPosition\r\nfrom ..window import Window\r\nfrom ..behaviors import ProgressBar, EditableTextWithoutAutoSelectDetection, Dialog\r\nimport textInfos.offsets\r\nfrom logHandler import log\r\nfrom .. import InvalidNVDAObject\r\nfrom locationHelper import RectLTWH\r\n\r\n\r\nJABRolesToNVDARoles: Dict[str, controlTypes.Role] = {\r\n\t\"alert\": controlTypes.Role.DIALOG,\r\n\t\"column header\": controlTypes.Role.TABLECOLUMNHEADER,\r\n\t\"canvas\": controlTypes.Role.CANVAS,\r\n\t\"combo box\": controlTypes.Role.COMBOBOX,\r\n\t\"desktop icon\": controlTypes.Role.DESKTOPICON,\r\n\t\"internal frame\": controlTypes.Role.INTERNALFRAME,\r\n\t\"desktop pane\": controlTypes.Role.DESKTOPPANE,\r\n\t\"option pane\": controlTypes.Role.OPTIONPANE,\r\n\t\"window\": controlTypes.Role.WINDOW,\r\n\t\"frame\": controlTypes.Role.FRAME,\r\n\t\"dialog\": controlTypes.Role.DIALOG,\r\n\t\"color chooser\": controlTypes.Role.COLORCHOOSER,\r\n\t\"directory pane\": controlTypes.Role.DIRECTORYPANE,\r\n\t\"file chooser\": controlTypes.Role.FILECHOOSER,\r\n\t\"filler\": controlTypes.Role.FILLER,\r\n\t\"hyperlink\": controlTypes.Role.LINK,\r\n\t\"icon\": controlTypes.Role.ICON,\r\n\t\"label\": controlTypes.Role.LABEL,\r\n\t\"root pane\": controlTypes.Role.PANEL,\r\n\t\"glass pane\": controlTypes.Role.PANEL,\r\n\t\"layered pane\": controlTypes.Role.PANEL,\r\n\t\"list\": controlTypes.Role.LIST,\r\n\t\"list item\": controlTypes.Role.LISTITEM,\r\n\t\"menu bar\": controlTypes.Role.MENUBAR,\r\n\t\"popup menu\": controlTypes.Role.POPUPMENU,\r\n\t\"menu\": controlTypes.Role.MENU,\r\n\t\"menu item\": controlTypes.Role.MENUITEM,\r\n\t\"separator\": controlTypes.Role.SEPARATOR,\r\n\t\"page tab list\": controlTypes.Role.TABCONTROL,\r\n\t\"page tab\": controlTypes.Role.TAB,\r\n\t\"panel\": controlTypes.Role.PANEL,\r\n\t\"progress bar\": controlTypes.Role.PROGRESSBAR,\r\n\t\"password text\": controlTypes.Role.PASSWORDEDIT,\r\n\t\"push button\": controlTypes.Role.BUTTON,\r\n\t\"toggle button\": controlTypes.Role.TOGGLEBUTTON,\r\n\t\"check box\": controlTypes.Role.CHECKBOX,\r\n\t\"radio button\": controlTypes.Role.RADIOBUTTON,\r\n\t\"row header\": controlTypes.Role.TABLEROWHEADER,\r\n\t\"scroll pane\": controlTypes.Role.SCROLLPANE,\r\n\t\"scroll bar\": controlTypes.Role.SCROLLBAR,\r\n\t\"view port\": controlTypes.Role.VIEWPORT,\r\n\t\"slider\": controlTypes.Role.SLIDER,\r\n\t\"split pane\": controlTypes.Role.SPLITPANE,\r\n\t\"table\": controlTypes.Role.TABLE,\r\n\t\"text\": controlTypes.Role.EDITABLETEXT,\r\n\t\"tree\": controlTypes.Role.TREEVIEW,\r\n\t\"tool bar\": controlTypes.Role.TOOLBAR,\r\n\t\"tool tip\": controlTypes.Role.TOOLTIP,\r\n\t\"status bar\": controlTypes.Role.STATUSBAR,\r\n\t\"statusbar\": controlTypes.Role.STATUSBAR,\r\n\t\"date editor\": controlTypes.Role.DATEEDITOR,\r\n\t\"spin box\": controlTypes.Role.SPINBUTTON,\r\n\t\"font chooser\": controlTypes.Role.FONTCHOOSER,\r\n\t\"group box\": controlTypes.Role.GROUPING,\r\n\t\"groupbox\": controlTypes.Role.GROUPING,\r\n\t\"header\": controlTypes.Role.HEADER,\r\n\t\"footer\": controlTypes.Role.FOOTER,\r\n\t\"paragraph\": controlTypes.Role.PARAGRAPH,\r\n\t\"ruler\": controlTypes.Role.RULER,\r\n\t\"edit bar\": controlTypes.Role.EDITBAR,\r\n}\r\n\r\nJABStatesToNVDAStates={\r\n\t\"busy\":controlTypes.State.BUSY,\r\n\t\"checked\":controlTypes.State.CHECKED,\r\n\t\"focused\":controlTypes.State.FOCUSED,\r\n\t\"selected\":controlTypes.State.SELECTED,\r\n\t\"pressed\":controlTypes.State.PRESSED,\r\n\t\"expanded\":controlTypes.State.EXPANDED,\r\n\t\"collapsed\":controlTypes.State.COLLAPSED,\r\n\t\"iconified\":controlTypes.State.ICONIFIED,\r\n\t\"modal\":controlTypes.State.MODAL,\r\n\t\"multi_line\":controlTypes.State.MULTILINE,\r\n\t\"focusable\":controlTypes.State.FOCUSABLE,\r\n\t\"editable\":controlTypes.State.EDITABLE,\r\n}\r\n\r\n\r\nre_simpleXmlTag = re.compile(r\"(\\<[^>]+\\>)+\")\r\n\r\n\r\ndef _subHtmlTag(match: re.match) -> str:\r\n\t\"\"\" Determines whether to replace the tag with a space or to just remove it. \"\"\"\r\n\tstartIndex, endIndex = match.span()\r\n\treturn \"\" if (\r\n\t\tstartIndex == 0 or match.string[startIndex - 1].isspace()\r\n\t\tor endIndex == len(match.string) or match.string[endIndex].isspace()\r\n\t) else \" \"\r\n\r\n\r\ndef _processHtml(text: str) -> str:\r\n\t\"\"\" Strips HTML tags from text if it is HTML \"\"\"\r\n\treturn re_simpleXmlTag.sub(_subHtmlTag, text) if text.startswith(\"<html>\") else text\r\n\r\n\r\nclass JABTextInfo(textInfos.offsets.OffsetsTextInfo):\r\n\r\n\tdef _getOffsetFromPoint(self,x,y):\r\n\t\tinfo=self.obj.jabContext.getAccessibleTextInfo(x,y)\r\n\t\toffset=max(min(info.indexAtPoint,info.charCount-1),0)\r\n\t\treturn offset\r\n\r\n\tdef _getBoundingRectFromOffset(self, offset):\r\n\t\trect = self.obj.jabContext.getAccessibleTextRect(offset)\r\n\t\ttry:\r\n\t\t\treturn RectLTWH(rect.x, rect.y, rect.width, rect.height).toLTRB()\r\n\t\texcept ValueError:\r\n\t\t\traise LookupError\r\n\r\n\tdef _getCaretOffset(self):\r\n\t\ttextInfo=self.obj.jabContext.getAccessibleTextInfo(self.obj._JABAccContextInfo.x,self.obj._JABAccContextInfo.y)\r\n\t\toffset=textInfo.caretIndex\r\n\t\t# OpenOffice sometimes returns nonsense, so treat charCount < offset as no caret.\r\n\t\tif offset==-1 or textInfo.charCount<offset:\r\n\t\t\traise RuntimeError(\"no available caret in this object\")\r\n\t\treturn offset\r\n\r\n\tdef _setCaretOffset(self,offset):\r\n\t\tself.obj.jabContext.setCaretPosition(offset)\r\n\r\n\tdef _getSelectionOffsets(self):\r\n\t\tinfo=self.obj.jabContext.getAccessibleTextSelectionInfo()\r\n\t\tstart=max(info.selectionStartIndex,0)\r\n\t\tend=max(info.selectionEndIndex,0)\r\n\t\treturn (start,end)\r\n\r\n\tdef _setSelectionOffsets(self,start,end):\r\n\t\tself.obj.jabContext.selectTextRange(start,end)\r\n\r\n\tdef _getStoryLength(self):\r\n\t\tif not hasattr(self,'_storyLength'):\r\n\t\t\ttextInfo=self.obj.jabContext.getAccessibleTextInfo(self.obj._JABAccContextInfo.x,self.obj._JABAccContextInfo.y)\r\n\t\t\tself._storyLength=textInfo.charCount\r\n\t\treturn self._storyLength\r\n\r\n\tdef _getTextRange(self,start,end):\r\n\t\t#Java needs end of range as last character, not one past the last character\r\n\t\ttext=self.obj.jabContext.getAccessibleTextRange(start,end-1)\r\n\t\treturn text\r\n\r\n\tdef _getLineNumFromOffset(self,offset):\r\n\t\treturn None\r\n\r\n\tdef _getLineOffsets(self,offset):\r\n\t\t(start,end)=self.obj.jabContext.getAccessibleTextLineBounds(offset)\r\n\t\tif end==-1 and offset>0:\r\n\t\t\t# #1892: JAB returns -1 for the end insertion position\r\n\t\t\t# instead of returning the offsets for the last line.\r\n\t\t\t# Try one character back.\r\n\t\t\t(start,end)=self.obj.jabContext.getAccessibleTextLineBounds(offset-1)\r\n\t\t#Java gives end as the last character, not one past the last character\r\n\t\tend=end+1\r\n\t\treturn (start,end)\r\n\r\n\tdef _getParagraphOffsets(self,offset):\r\n\t\treturn self._getLineOffsets(offset)\r\n\r\n\tdef _getFormatFieldAndOffsets(self, offset, formatConfig, calculateOffsets=True):\r\n\t\tattribs: JABHandler.AccessibleTextAttributesInfo\r\n\t\tattribs, length = self.obj.jabContext.getTextAttributesInRange(offset, self._endOffset - 1)\r\n\t\tfield = textInfos.FormatField()\r\n\t\tfield[\"font-family\"] = attribs.fontFamily\r\n\t\t# Translators: Abbreviation for points, a measurement of font size.\r\n\t\tfield[\"font-size\"] = pgettext(\"font size\", \"%s pt\") % str(attribs.fontSize)\r\n\t\tfield[\"bold\"] = bool(attribs.bold)\r\n\t\tfield[\"italic\"] = bool(attribs.italic)\r\n\t\tfield[\"strikethrough\"] = bool(attribs.strikethrough)\r\n\t\tfield[\"underline\"] = bool(attribs.underline)\r\n\t\tif attribs.superscript:\r\n\t\t\tfield[\"text-position\"] = TextPosition.SUPERSCRIPT\r\n\t\telif attribs.subscript:\r\n\t\t\tfield[\"text-position\"] = TextPosition.SUBSCRIPT\r\n\t\telse:\r\n\t\t\tfield[\"text-position\"] = TextPosition.BASELINE\r\n\t\t# TODO: Not sure how to interpret Java's alignment numbers.\r\n\t\treturn field, (offset, offset + length)\r\n\r\n\tdef getEmbeddedObject(self, offset=0):\r\n\t\toffset += self._startOffset\r\n\r\n\t\t# We need to count the embedded objects to determine which child to use.\r\n\t\t# This could possibly be optimised by caching.\r\n\t\ttext = self._getTextRange(0, offset + 1)\r\n\t\tchildIndex = text.count(textUtils.OBJ_REPLACEMENT_CHAR) - 1\r\n\t\tjabContext=self.obj.jabContext.getAccessibleChildFromContext(childIndex)\r\n\t\tif jabContext:\r\n\t\t\treturn JAB(jabContext=jabContext)\r\n\r\n\t\traise LookupError\r\n\r\nclass JAB(Window):\r\n\r\n\tdef findOverlayClasses(self,clsList):\r\n\t\trole = self.JABRole\r\n\t\tif self._JABAccContextInfo.accessibleText and role in (\"text\",\"password text\",\"edit bar\",\"view port\",\"paragraph\"):\r\n\t\t\tclsList.append(EditableTextWithoutAutoSelectDetection)\r\n\t\telif role in (\"dialog\", \"alert\"):\r\n\t\t\tclsList.append(Dialog)\r\n\t\telif role==\"combo box\":\r\n\t\t\tclsList.append(ComboBox)\r\n\t\telif role==\"table\":\r\n\t\t\tclsList.append(Table)\r\n\t\telif self.parent and isinstance(self.parent,Table) and self.parent._jabTableInfo:\r\n\t\t\tclsList.append(TableCell)\r\n\t\telif role == \"progress bar\":\r\n\t\t\tclsList.append(ProgressBar)\r\n\r\n\t\tclsList.append(JAB)\r\n\r\n\t@classmethod\r\n\tdef kwargsFromSuper(cls,kwargs,relation=None):\r\n\t\tjabContext=None\r\n\t\twindowHandle=kwargs['windowHandle']\r\n\t\tif relation==\"focus\":\r\n\t\t\tvmID=ctypes.c_int()\r\n\t\t\taccContext=JABHandler.JOBJECT64()\r\n\t\t\tJABHandler.bridgeDll.getAccessibleContextWithFocus(windowHandle,ctypes.byref(vmID),ctypes.byref(accContext))\r\n\t\t\tjabContext=JABHandler.JABContext(hwnd=windowHandle,vmID=vmID.value,accContext=accContext.value)\r\n\t\telif isinstance(relation,tuple):\r\n\t\t\tjabContext=JABHandler.JABContext(hwnd=windowHandle)\r\n\t\t\tif jabContext:\r\n\t\t\t\tjabContext=jabContext.getAccessibleContextAt(*relation)\r\n\t\telse:\r\n\t\t\tjabContext=JABHandler.JABContext(hwnd=windowHandle)\r\n\t\tif not jabContext:\r\n\t\t\treturn False\r\n\t\tkwargs['jabContext']=jabContext\r\n\t\treturn True\r\n\r\n\tdef __init__(self,relation=None,windowHandle=None,jabContext=None):\r\n\t\tif not windowHandle:\r\n\t\t\twindowHandle=jabContext.hwnd\r\n\t\tself.windowHandle=windowHandle\r\n\t\tself.jabContext=jabContext\r\n\t\tsuper(JAB,self).__init__(windowHandle=windowHandle)\r\n\t\ttry:\r\n\t\t\tself._JABAccContextInfo\r\n\t\texcept RuntimeError:\r\n\t\t\traise InvalidNVDAObject(\"Could not get accessible context info\")\r\n\r\n\tdef _get__JABAccContextInfo(self):\r\n\t\treturn self.jabContext.getAccessibleContextInfo()\r\n\r\n\tdef _get_TextInfo(self):\r\n\t\tif self._JABAccContextInfo.accessibleText and self.role not in [controlTypes.Role.BUTTON,controlTypes.Role.MENUITEM,controlTypes.Role.MENU,controlTypes.Role.LISTITEM]:\r\n\t\t\treturn JABTextInfo\r\n\t\treturn super(JAB,self).TextInfo\r\n\r\n\tdef _isEqual(self,other):\r\n\t\ttry:\r\n\t\t\treturn self.jabContext==other.jabContext\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n\tdef _get_keyboardShortcut(self):\r\n\t\tbindings=self.jabContext.getAccessibleKeyBindings()\r\n\t\tif not bindings or bindings.keyBindingsCount<1: \r\n\t\t\treturn None\r\n\t\tshortcutsList=[]\r\n\t\tfor index in range(bindings.keyBindingsCount):\r\n\t\t\tbinding=bindings.keyBindingInfo[index]\r\n\t\t\t# We don't support these modifiers\r\n\t\t\tif binding.modifiers & (\r\n\t\t\t\tJABHandler.AccessibleKeystroke.META\r\n\t\t\t\t| JABHandler.AccessibleKeystroke.ALT_GRAPH\r\n\t\t\t\t| JABHandler.AccessibleKeystroke.BUTTON1\r\n\t\t\t\t| JABHandler.AccessibleKeystroke.BUTTON2\r\n\t\t\t\t| JABHandler.AccessibleKeystroke.BUTTON3\r\n\t\t\t):\r\n\t\t\t\tcontinue\r\n\t\t\tmodifiers = binding.modifiers\r\n\t\t\t# We assume alt if there are no modifiers at all and its not a menu item as this is clearly a nmonic\r\n\t\t\tif not modifiers and self.role != controlTypes.Role.MENUITEM:\r\n\t\t\t\tmodifiers |= JABHandler.AccessibleKeystroke.ALT\r\n\t\t\tkeyList = [\r\n\t\t\t\tkeyLabels.localizedKeyLabels.get(l, l)\r\n\t\t\t\tfor l in JABHandler._getKeyLabels(modifiers, binding.character)\r\n\t\t\t]\r\n\t\t\tshortcutsList.append(\"+\".join(keyList))\r\n\t\treturn \", \".join(shortcutsList)\r\n\r\n\tdef _get_name(self):\r\n\t\tname = self._JABAccContextInfo.name\r\n\t\treturn _processHtml(name)\r\n\r\n\tdef _get_JABRole(self):\r\n\t\treturn self._JABAccContextInfo.role_en_US\r\n\r\n\tdef _get_role(self):\r\n\t\trole = JABRolesToNVDARoles.get(self.JABRole,controlTypes.Role.UNKNOWN)\r\n\t\tif role in ( controlTypes.Role.LABEL, controlTypes.Role.PANEL) and self.parent:\r\n\t\t\tparentRole = self.parent.role\r\n\t\t\tif parentRole == controlTypes.Role.LIST:\r\n\t\t\t\treturn controlTypes.Role.LISTITEM\r\n\t\t\telif parentRole in (controlTypes.Role.TREEVIEW, controlTypes.Role.TREEVIEWITEM):\r\n\t\t\t\treturn controlTypes.Role.TREEVIEWITEM\r\n\t\tif role==controlTypes.Role.LABEL:\r\n\t\t\treturn controlTypes.Role.STATICTEXT\r\n\t\treturn role\r\n\r\n\tdef _get_JABStates(self):\r\n\t\treturn self._JABAccContextInfo.states_en_US\r\n\r\n\tdef _get_states(self):\r\n\t\tlog.debug(\"states: %s\"%self.JABStates)\r\n\t\tstateSet=set()\r\n\t\tstateString=self.JABStates\r\n\t\tstateStrings=stateString.split(',')\r\n\t\tfor state in stateStrings:\r\n\t\t\tif state in JABStatesToNVDAStates:\r\n\t\t\t\tstateSet.add(JABStatesToNVDAStates[state])\r\n\t\tif self.role is controlTypes.Role.TOGGLEBUTTON and controlTypes.State.CHECKED in stateSet:\r\n\t\t\tstateSet.discard(controlTypes.State.CHECKED)\r\n\t\t\tstateSet.add(controlTypes.State.PRESSED)\r\n\t\tif \"editable\" not in stateStrings and self._JABAccContextInfo.accessibleText:\r\n\t\t\tstateSet.add(controlTypes.State.READONLY)\r\n\t\tif \"visible\" not in stateStrings:\r\n\t\t\tstateSet.add(controlTypes.State.INVISIBLE)\r\n\t\tif \"showing\" not in stateStrings:\r\n\t\t\tstateSet.add(controlTypes.State.OFFSCREEN)\r\n\t\tif \"expandable\" not in stateStrings:\r\n\t\t\tstateSet.discard(controlTypes.State.COLLAPSED)\r\n\t\tif \"enabled\" not in stateStrings:\r\n\t\t\tstateSet.add(controlTypes.State.UNAVAILABLE)\r\n\t\treturn stateSet\r\n\r\n\tdef _get_value(self):\r\n\t\tif (\r\n\t\t\tself.role not in [\r\n\t\t\t\tcontrolTypes.Role.TOGGLEBUTTON, controlTypes.Role.CHECKBOX,\r\n\t\t\t\tcontrolTypes.Role.MENU, controlTypes.Role.MENUITEM,\r\n\t\t\t\tcontrolTypes.Role.RADIOBUTTON, controlTypes.Role.BUTTON\r\n\t\t\t]\r\n\t\t\tand self._JABAccContextInfo.accessibleValue\r\n\t\t\tand not self._JABAccContextInfo.accessibleText\r\n\t\t):\r\n\t\t\treturn self.jabContext.getCurrentAccessibleValueFromContext()\r\n\r\n\tdef _get_description(self):\r\n\t\tdescription = self._JABAccContextInfo.description\r\n\t\treturn _processHtml(description)\r\n\r\n\tdef _get_location(self):\r\n\t\treturn RectLTWH(self._JABAccContextInfo.x,self._JABAccContextInfo.y,self._JABAccContextInfo.width,self._JABAccContextInfo.height)\r\n\r\n\tdef _get_hasFocus(self):\r\n\t\tif controlTypes.State.FOCUSED in self.states:\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False\r\n\r\n\tdef _get_positionInfo(self):\r\n\t\tinfo=super(JAB,self).positionInfo or {}\r\n\r\n\t\t# If tree view item, try to retrieve the level via JAB\r\n\t\tif self.role==controlTypes.Role.TREEVIEWITEM:\r\n\t\t\ttry:\r\n\t\t\t\ttree=self.jabContext.getAccessibleParentWithRole(\"tree\")\r\n\t\t\t\tif tree:\r\n\t\t\t\t\ttreeDepth=tree.getObjectDepth()\r\n\t\t\t\t\tselfDepth=self.jabContext.getObjectDepth()\r\n\t\t\t\t\tif selfDepth > treeDepth:\r\n\t\t\t\t\t\tinfo['level']=selfDepth-treeDepth\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\r\n\t\ttargets=self._getJABRelationTargets('memberOf')\r\n\t\tfor index,target in enumerate(targets):\r\n\t\t\tif target==self.jabContext:\r\n\t\t\t\tinfo['indexInGroup']=index+1\r\n\t\t\t\tinfo['similarItemsInGroup']=len(targets)\r\n\t\t\t\treturn info\r\n\r\n\t\tparent=self.parent\r\n\t\tif (\r\n\t\t\tisinstance(parent, JAB)\r\n\t\t\tand self.role in (\r\n\t\t\t\tcontrolTypes.Role.TREEVIEWITEM,\r\n\t\t\t\tcontrolTypes.Role.LISTITEM,\r\n\t\t\t\tcontrolTypes.Role.TAB\r\n\t\t\t)\r\n\t\t):\r\n\t\t\tindex=self._JABAccContextInfo.indexInParent+1\r\n\t\t\tchildCount=parent._JABAccContextInfo.childrenCount\r\n\t\t\tinfo['indexInGroup']=index\r\n\t\t\tinfo['similarItemsInGroup']=childCount\r\n\t\treturn info\r\n\r\n\tdef _get_activeChild(self):\r\n\t\tjabContext=self.jabContext.getActiveDescendent()\r\n\t\tif jabContext:\r\n\t\t\treturn JAB(jabContext=jabContext)\r\n\t\telse:\r\n\t\t\treturn None\r\n\r\n\tdef _get_parent(self):\r\n\t\tif not hasattr(self,'_parent'):\r\n\t\t\tjabContext=self.jabContext.getAccessibleParentFromContext()\r\n\t\t\tif jabContext and self.indexInParent is not None:\r\n\t\t\t\tself._parent=JAB(jabContext=jabContext)\r\n\t\t\telse:\r\n\t\t\t\tself._parent=super(JAB,self).parent\r\n\t\treturn self._parent\r\n \r\n\tdef _get_next(self):\r\n\t\tparent=self.parent\r\n\t\tif not isinstance(parent,JAB):\r\n\t\t\treturn super(JAB,self).next\r\n\t\tif self.indexInParent is None:\r\n\t\t\treturn None\r\n\t\tnewIndex=self.indexInParent+1\r\n\t\tif newIndex>=parent._JABAccContextInfo.childrenCount:\r\n\t\t\treturn None\r\n\t\tjabContext=parent.jabContext.getAccessibleChildFromContext(newIndex)\r\n\t\tif not jabContext:\r\n\t\t\treturn None\r\n\t\tobj=JAB(jabContext=jabContext)\r\n\t\tif not isinstance(obj.parent,JAB):\r\n\t\t\tobj.parent=parent\r\n\t\tif obj.indexInParent is None:\r\n\t\t\tobj.indexInParent=newIndex\r\n\t\telif obj.indexInParent<=self.indexInParent: \r\n\t\t\treturn None\r\n\t\treturn obj\r\n\r\n\tdef _get_previous(self):\r\n\t\tparent=self.parent\r\n\t\tif not isinstance(parent,JAB):\r\n\t\t\treturn super(JAB,self).previous\r\n\t\tif self.indexInParent is None:\r\n\t\t\treturn None\r\n\t\tnewIndex=self.indexInParent-1\r\n\t\tif newIndex<0:\r\n\t\t\treturn None\r\n\t\tjabContext=parent.jabContext.getAccessibleChildFromContext(newIndex)\r\n\t\tif not jabContext:\r\n\t\t\treturn None\r\n\t\tobj=JAB(jabContext=jabContext)\r\n\t\tif not isinstance(obj.parent,JAB):\r\n\t\t\tobj.parent=parent\r\n\t\tif obj.indexInParent is None:\r\n\t\t\tobj.indexInParent=newIndex\r\n\t\telif obj.indexInParent>=self.indexInParent: \r\n\t\t\treturn None\r\n\t\treturn obj\r\n\r\n\tdef _get_firstChild(self):\r\n\t\tif self._JABAccContextInfo.childrenCount<=0:\r\n\t\t\treturn None\r\n\t\tjabContext=self.jabContext.getAccessibleChildFromContext(0)\r\n\t\tif jabContext:\r\n\t\t\tobj=JAB(jabContext=jabContext)\r\n\t\t\tif not isinstance(obj.parent,JAB):\r\n\t\t\t\tobj.parent=self\r\n\t\t\tif obj.indexInParent is None:\r\n\t\t\t\tobj.indexInParent=0\r\n\t\t\treturn obj\r\n\t\telse:\r\n\t\t\treturn None\r\n\r\n\tdef _get_lastChild(self):\r\n\t\tif self._JABAccContextInfo.childrenCount<=0:\r\n\t\t\treturn None\r\n\t\tjabContext=self.jabContext.getAccessibleChildFromContext(self.childCount-1)\r\n\t\tif jabContext:\r\n\t\t\tobj=JAB(jabContext=jabContext)\r\n\t\t\tif not isinstance(obj.parent,JAB):\r\n\t\t\t\tobj.parent=self\r\n\t\t\tif obj.indexInParent is None:\r\n\t\t\t\tobj.indexInParent=self.childCount-1\r\n\t\t\treturn obj\r\n\t\telse:\r\n\t\t\treturn None\r\n\r\n\tdef _get_childCount(self):\r\n\t\treturn self._JABAccContextInfo.childrenCount\r\n\r\n\tdef _get_children(self):\r\n\t\tchildren=[]\r\n\t\tfor index in range(self._JABAccContextInfo.childrenCount):\r\n\t\t\tjabContext=self.jabContext.getAccessibleChildFromContext(index)\r\n\t\t\tif jabContext:\r\n\t\t\t\tobj=JAB(jabContext=jabContext)\r\n\t\t\t\tif not isinstance(obj.parent,JAB):\r\n\t\t\t\t\tobj.parent=self\r\n\t\t\t\tif obj.indexInParent is None:\r\n\t\t\t\t\tobj.indexInParent=index\r\n\t\t\t\tchildren.append(obj)\r\n\t\treturn children\r\n\r\n\tdef _get_indexInParent(self):\r\n\t\tindex = self._JABAccContextInfo.indexInParent\r\n\t\tif index == -1:\r\n\t\t\treturn None\r\n\t\treturn index\r\n\r\n\tdef _getJABRelationTargets(self, key):\r\n\t\trs = self.jabContext.getAccessibleRelationSet()\r\n\t\ttargets=[]\r\n\t\tfor relation in rs.relations[:rs.relationCount]:\r\n\t\t\tfor target in relation.targets[:relation.targetCount]:\r\n\t\t\t\tif relation.key == key:\r\n\t\t\t\t\ttargets.append(JABHandler.JABContext(self.jabContext.hwnd, self.jabContext.vmID, target))\r\n\t\t\t\telse:\r\n\t\t\t\t\tJABHandler.bridgeDll.releaseJavaObject(self.jabContext.vmID,target)\r\n\t\treturn targets\r\n\r\n\tdef _get_flowsTo(self):\r\n\t\ttargets=self._getJABRelationTargets(\"flowsTo\")\r\n\t\tif targets:\r\n\t\t\treturn targets[0]\r\n\r\n\tdef _get_flowsFrom(self):\r\n\t\ttargets=self._getJABRelationTargets(\"flowsFrom\")\r\n\t\tif targets:\r\n\t\t\treturn targets[0]\r\n\r\n\tdef reportFocus(self):\r\n\t\tparent=self.parent\r\n\t\tif self.role in [controlTypes.Role.LIST] and isinstance(parent,JAB) and parent.role==controlTypes.Role.COMBOBOX:\r\n\t\t\treturn\r\n\t\tsuper(JAB,self).reportFocus()\r\n\r\n\tdef _get__actions(self):\r\n\t\tactions = JABHandler.AccessibleActions()\r\n\t\tJABHandler.bridgeDll.getAccessibleActions(self.jabContext.vmID, self.jabContext.accContext, actions)\r\n\t\treturn actions.actionInfo[:actions.actionsCount]\r\n\r\n\tdef _get_actionCount(self):\r\n\t\treturn len(self._actions)\r\n\r\n\tdef getActionName(self, index=None):\r\n\t\tif index is None:\r\n\t\t\tindex = self.defaultActionIndex\r\n\t\ttry:\r\n\t\t\treturn self._actions[index].name\r\n\t\texcept IndexError:\r\n\t\t\traise NotImplementedError\r\n\r\n\tdef doAction(self, index=None):\r\n\t\tif index is None:\r\n\t\t\tindex = self.defaultActionIndex\r\n\t\ttry:\r\n\t\t\tJABHandler.bridgeDll.doAccessibleActions(self.jabContext.vmID, self.jabContext.accContext,\r\n\t\t\t\tJABHandler.AccessibleActionsToDo(actionsCount=1, actions=(self._actions[index],)),\r\n\t\t\t\tJABHandler.jint())\r\n\t\texcept (IndexError, RuntimeError):\r\n\t\t\traise NotImplementedError\r\n\r\n\tdef _get_activeDescendant(self):\r\n\t\tdescendantFound=False\r\n\t\tjabContext=self.jabContext\r\n\t\twhile jabContext:\r\n\t\t\ttry:\r\n\t\t\t\ttempContext=jabContext.getActiveDescendent()\r\n\t\t\texcept:\r\n\t\t\t\tbreak\r\n\t\t\tif not tempContext:\r\n\t\t\t\tbreak\r\n\t\t\ttry:\r\n\t\t\t\tdepth=tempContext.getObjectDepth()\r\n\t\t\texcept:\r\n\t\t\t\tdepth=-1\r\n\t\t\tif depth<=0 or tempContext==jabContext: \r\n\t\t\t\tbreak\r\n\t\t\tjabContext=tempContext\r\n\t\t\tdescendantFound=True\r\n\t\tif descendantFound:\r\n\t\t\treturn JAB(jabContext=jabContext)\r\n\r\n\tdef event_gainFocus(self):\r\n\t\tif eventHandler.isPendingEvents(\"gainFocus\"):\r\n\t\t\treturn\r\n\t\tsuper(JAB,self).event_gainFocus()\r\n\t\tif eventHandler.isPendingEvents(\"gainFocus\"):\r\n\t\t\treturn\r\n\t\tactiveDescendant=self.activeDescendant\r\n\t\tif activeDescendant:\r\n\t\t\teventHandler.queueEvent(\"gainFocus\",activeDescendant)\r\n\r\n\r\nclass ComboBox(JAB):\r\n\r\n\tdef _get_states(self):\r\n\t\tstates=super(ComboBox,self).states\r\n\t\tif controlTypes.State.COLLAPSED not in states and controlTypes.State.EXPANDED not in states:\r\n\t\t\tif self.childCount==1 and self.firstChild and self.firstChild.role==controlTypes.Role.POPUPMENU:\r\n\t\t\t\tif controlTypes.State.INVISIBLE in self.firstChild.states:\r\n\t\t\t\t\tstates.add(controlTypes.State.COLLAPSED)\r\n\t\t\t\telse:\r\n\t\t\t\t\tstates.add(controlTypes.State.EXPANDED)\r\n\t\treturn states\r\n\r\n\tdef _get_activeDescendant(self):\r\n\t\tif controlTypes.State.COLLAPSED in self.states:\r\n\t\t\treturn None\r\n\t\treturn super(ComboBox,self).activeDescendant\r\n\r\n\tdef _get_value(self):\r\n\t\tvalue=super(ComboBox,self).value\r\n\t\tif not value and not self.activeDescendant: \r\n\t\t\tdescendant=super(ComboBox,self).activeDescendant\r\n\t\t\tif descendant:\r\n\t\t\t\tvalue=descendant.name\r\n\t\treturn value\r\n\r\nclass Table(JAB):\r\n\r\n\tdef _get__jabTableInfo(self):\r\n\t\tinfo=self.jabContext.getAccessibleTableInfo()\r\n\t\tif info:\r\n\t\t\tself._jabTableInfo=info\r\n\t\t\treturn info\r\n\r\n\tdef _get_rowCount(self):\r\n\t\tif self._jabTableInfo:\r\n\t\t\treturn self._jabTableInfo.rowCount\r\n\r\n\tdef _get_columnCount(self):\r\n\t\tif self._jabTableInfo:\r\n\t\t\treturn self._jabTableInfo.columnCount\r\n\r\n\tdef _get_tableID(self):\r\n\t\treturn self._jabTableInfo.jabTable.accContext.value\r\n\r\nclass TableCell(JAB):\r\n\r\n\trole=controlTypes.Role.TABLECELL\r\n\r\n\tdef _get_table(self):\r\n\t\tif self.parent and isinstance(self.parent,Table):\r\n\t\t\tself.table=self.parent\r\n\t\t\treturn self.table\r\n\r\n\tdef _get_tableID(self):\r\n\t\treturn self.table.tableID\r\n\r\n\tdef _get_rowNumber(self):\r\n\t\treturn self.table._jabTableInfo.jabTable.getAccessibleTableRow(self.indexInParent)+1\r\n\r\n\tdef _get_columnNumber(self):\r\n\t\treturn self.table._jabTableInfo.jabTable.getAccessibleTableColumn(self.indexInParent)+1\r\n\r\n\tdef _get_rowHeaderText(self):\r\n\t\theaderTableInfo=self.table.jabContext.getAccessibleTableRowHeader()\r\n\t\tif headerTableInfo and headerTableInfo.jabTable:\r\n\t\t\ttextList=[]\r\n\t\t\trow=self.rowNumber-1\r\n\t\t\tfor col in range(headerTableInfo.columnCount):\r\n\t\t\t\tcellInfo=headerTableInfo.jabTable.getAccessibleTableCellInfo(row,col)\r\n\t\t\t\tif cellInfo and cellInfo.jabContext:\r\n\t\t\t\t\tobj=JAB(jabContext=cellInfo.jabContext)\r\n\t\t\t\t\tif obj.name: textList.append(obj.name)\r\n\t\t\t\t\tif obj.description: textList.append(obj.description)\r\n\t\t\tjabContext=self.table._jabTableInfo.jabTable.getAccessibleTableRowDescription(row)\r\n\t\t\tif jabContext:\r\n\t\t\t\tobj=JAB(jabContext=jabContext)\r\n\t\t\t\tif obj.name: textList.append(obj.name)\r\n\t\t\t\tif obj.description: textList.append(obj.description)\r\n\t\t\treturn \" \".join(textList)\r\n\r\n\tdef _get_columnHeaderText(self):\r\n\t\theaderTableInfo=self.table.jabContext.getAccessibleTableColumnHeader()\r\n\t\tif headerTableInfo and headerTableInfo.jabTable:\r\n\t\t\ttextList=[]\r\n\t\t\tcol=self.columnNumber-1\r\n\t\t\tfor row in range(headerTableInfo.rowCount):\r\n\t\t\t\tcellInfo=headerTableInfo.jabTable.getAccessibleTableCellInfo(row,col)\r\n\t\t\t\tif cellInfo and cellInfo.jabContext:\r\n\t\t\t\t\tobj=JAB(jabContext=cellInfo.jabContext)\r\n\t\t\t\t\tif obj.name: textList.append(obj.name)\r\n\t\t\t\t\tif obj.description: textList.append(obj.description)\r\n\t\t\tjabContext=self.table._jabTableInfo.jabTable.getAccessibleTableColumnDescription(col)\r\n\t\t\tif jabContext:\r\n\t\t\t\tobj=JAB(jabContext=jabContext)\r\n\t\t\t\tif obj.name: textList.append(obj.name)\r\n\t\t\t\tif obj.description: textList.append(obj.description)\r\n\t\t\treturn \" \".join(textList)\r\n",
"path": "source/NVDAObjects/JAB/__init__.py"
}
] | [
{
"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2006-2022 NV Access Limited, Leonard de Ruijter, Joseph Lee, Renaud Paquay, pvagner\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n\r\nimport ctypes\r\nimport re\r\nfrom typing import (\r\n\tDict,\r\n)\r\nimport eventHandler\r\nimport keyLabels\r\nimport JABHandler\r\nimport controlTypes\r\nimport textUtils\r\nfrom controlTypes import TextPosition\r\nfrom ..window import Window\r\nfrom ..behaviors import ProgressBar, EditableTextWithoutAutoSelectDetection, Dialog\r\nimport textInfos.offsets\r\nfrom logHandler import log\r\nfrom .. import InvalidNVDAObject\r\nfrom locationHelper import RectLTWH\r\n\r\n\r\nJABRolesToNVDARoles: Dict[str, controlTypes.Role] = {\r\n\t\"alert\": controlTypes.Role.DIALOG,\r\n\t\"column header\": controlTypes.Role.TABLECOLUMNHEADER,\r\n\t\"canvas\": controlTypes.Role.CANVAS,\r\n\t\"combo box\": controlTypes.Role.COMBOBOX,\r\n\t\"desktop icon\": controlTypes.Role.DESKTOPICON,\r\n\t\"internal frame\": controlTypes.Role.INTERNALFRAME,\r\n\t\"desktop pane\": controlTypes.Role.DESKTOPPANE,\r\n\t\"option pane\": controlTypes.Role.OPTIONPANE,\r\n\t\"window\": controlTypes.Role.WINDOW,\r\n\t\"frame\": controlTypes.Role.FRAME,\r\n\t\"dialog\": controlTypes.Role.DIALOG,\r\n\t\"color chooser\": controlTypes.Role.COLORCHOOSER,\r\n\t\"directory pane\": controlTypes.Role.DIRECTORYPANE,\r\n\t\"file chooser\": controlTypes.Role.FILECHOOSER,\r\n\t\"filler\": controlTypes.Role.FILLER,\r\n\t\"hyperlink\": controlTypes.Role.LINK,\r\n\t\"icon\": controlTypes.Role.ICON,\r\n\t\"label\": controlTypes.Role.LABEL,\r\n\t\"root pane\": controlTypes.Role.PANEL,\r\n\t\"glass pane\": controlTypes.Role.PANEL,\r\n\t\"layered pane\": controlTypes.Role.PANEL,\r\n\t\"list\": controlTypes.Role.LIST,\r\n\t\"list item\": controlTypes.Role.LISTITEM,\r\n\t\"menu bar\": controlTypes.Role.MENUBAR,\r\n\t\"popup menu\": controlTypes.Role.POPUPMENU,\r\n\t\"menu\": controlTypes.Role.MENU,\r\n\t\"menu item\": controlTypes.Role.MENUITEM,\r\n\t\"separator\": controlTypes.Role.SEPARATOR,\r\n\t\"page tab list\": controlTypes.Role.TABCONTROL,\r\n\t\"page tab\": controlTypes.Role.TAB,\r\n\t\"panel\": controlTypes.Role.PANEL,\r\n\t\"progress bar\": controlTypes.Role.PROGRESSBAR,\r\n\t\"password text\": controlTypes.Role.PASSWORDEDIT,\r\n\t\"push button\": controlTypes.Role.BUTTON,\r\n\t\"toggle button\": controlTypes.Role.TOGGLEBUTTON,\r\n\t\"check box\": controlTypes.Role.CHECKBOX,\r\n\t\"radio button\": controlTypes.Role.RADIOBUTTON,\r\n\t\"row header\": controlTypes.Role.TABLEROWHEADER,\r\n\t\"scroll pane\": controlTypes.Role.SCROLLPANE,\r\n\t\"scroll bar\": controlTypes.Role.SCROLLBAR,\r\n\t\"view port\": controlTypes.Role.VIEWPORT,\r\n\t\"slider\": controlTypes.Role.SLIDER,\r\n\t\"split pane\": controlTypes.Role.SPLITPANE,\r\n\t\"table\": controlTypes.Role.TABLE,\r\n\t\"text\": controlTypes.Role.EDITABLETEXT,\r\n\t\"tree\": controlTypes.Role.TREEVIEW,\r\n\t\"tool bar\": controlTypes.Role.TOOLBAR,\r\n\t\"tool tip\": controlTypes.Role.TOOLTIP,\r\n\t\"status bar\": controlTypes.Role.STATUSBAR,\r\n\t\"statusbar\": controlTypes.Role.STATUSBAR,\r\n\t\"date editor\": controlTypes.Role.DATEEDITOR,\r\n\t\"spin box\": controlTypes.Role.SPINBUTTON,\r\n\t\"font chooser\": controlTypes.Role.FONTCHOOSER,\r\n\t\"group box\": controlTypes.Role.GROUPING,\r\n\t\"groupbox\": controlTypes.Role.GROUPING,\r\n\t\"header\": controlTypes.Role.HEADER,\r\n\t\"footer\": controlTypes.Role.FOOTER,\r\n\t\"paragraph\": controlTypes.Role.PARAGRAPH,\r\n\t\"ruler\": controlTypes.Role.RULER,\r\n\t\"edit bar\": controlTypes.Role.EDITBAR,\r\n}\r\n\r\nJABStatesToNVDAStates={\r\n\t\"busy\":controlTypes.State.BUSY,\r\n\t\"checked\":controlTypes.State.CHECKED,\r\n\t\"focused\":controlTypes.State.FOCUSED,\r\n\t\"selected\":controlTypes.State.SELECTED,\r\n\t\"pressed\":controlTypes.State.PRESSED,\r\n\t\"expanded\":controlTypes.State.EXPANDED,\r\n\t\"collapsed\":controlTypes.State.COLLAPSED,\r\n\t\"iconified\":controlTypes.State.ICONIFIED,\r\n\t\"modal\":controlTypes.State.MODAL,\r\n\t\"multi_line\":controlTypes.State.MULTILINE,\r\n\t\"focusable\":controlTypes.State.FOCUSABLE,\r\n\t\"editable\":controlTypes.State.EDITABLE,\r\n\t\"selectable\": controlTypes.State.SELECTABLE,\r\n}\r\n\r\n\r\nre_simpleXmlTag = re.compile(r\"(\\<[^>]+\\>)+\")\r\n\r\n\r\ndef _subHtmlTag(match: re.match) -> str:\r\n\t\"\"\" Determines whether to replace the tag with a space or to just remove it. \"\"\"\r\n\tstartIndex, endIndex = match.span()\r\n\treturn \"\" if (\r\n\t\tstartIndex == 0 or match.string[startIndex - 1].isspace()\r\n\t\tor endIndex == len(match.string) or match.string[endIndex].isspace()\r\n\t) else \" \"\r\n\r\n\r\ndef _processHtml(text: str) -> str:\r\n\t\"\"\" Strips HTML tags from text if it is HTML \"\"\"\r\n\treturn re_simpleXmlTag.sub(_subHtmlTag, text) if text.startswith(\"<html>\") else text\r\n\r\n\r\nclass JABTextInfo(textInfos.offsets.OffsetsTextInfo):\r\n\r\n\tdef _getOffsetFromPoint(self,x,y):\r\n\t\tinfo=self.obj.jabContext.getAccessibleTextInfo(x,y)\r\n\t\toffset=max(min(info.indexAtPoint,info.charCount-1),0)\r\n\t\treturn offset\r\n\r\n\tdef _getBoundingRectFromOffset(self, offset):\r\n\t\trect = self.obj.jabContext.getAccessibleTextRect(offset)\r\n\t\ttry:\r\n\t\t\treturn RectLTWH(rect.x, rect.y, rect.width, rect.height).toLTRB()\r\n\t\texcept ValueError:\r\n\t\t\traise LookupError\r\n\r\n\tdef _getCaretOffset(self):\r\n\t\ttextInfo=self.obj.jabContext.getAccessibleTextInfo(self.obj._JABAccContextInfo.x,self.obj._JABAccContextInfo.y)\r\n\t\toffset=textInfo.caretIndex\r\n\t\t# OpenOffice sometimes returns nonsense, so treat charCount < offset as no caret.\r\n\t\tif offset==-1 or textInfo.charCount<offset:\r\n\t\t\traise RuntimeError(\"no available caret in this object\")\r\n\t\treturn offset\r\n\r\n\tdef _setCaretOffset(self,offset):\r\n\t\tself.obj.jabContext.setCaretPosition(offset)\r\n\r\n\tdef _getSelectionOffsets(self):\r\n\t\tinfo=self.obj.jabContext.getAccessibleTextSelectionInfo()\r\n\t\tstart=max(info.selectionStartIndex,0)\r\n\t\tend=max(info.selectionEndIndex,0)\r\n\t\treturn (start,end)\r\n\r\n\tdef _setSelectionOffsets(self,start,end):\r\n\t\tself.obj.jabContext.selectTextRange(start,end)\r\n\r\n\tdef _getStoryLength(self):\r\n\t\tif not hasattr(self,'_storyLength'):\r\n\t\t\ttextInfo=self.obj.jabContext.getAccessibleTextInfo(self.obj._JABAccContextInfo.x,self.obj._JABAccContextInfo.y)\r\n\t\t\tself._storyLength=textInfo.charCount\r\n\t\treturn self._storyLength\r\n\r\n\tdef _getTextRange(self,start,end):\r\n\t\t#Java needs end of range as last character, not one past the last character\r\n\t\ttext=self.obj.jabContext.getAccessibleTextRange(start,end-1)\r\n\t\treturn text\r\n\r\n\tdef _getLineNumFromOffset(self,offset):\r\n\t\treturn None\r\n\r\n\tdef _getLineOffsets(self,offset):\r\n\t\t(start,end)=self.obj.jabContext.getAccessibleTextLineBounds(offset)\r\n\t\tif end==-1 and offset>0:\r\n\t\t\t# #1892: JAB returns -1 for the end insertion position\r\n\t\t\t# instead of returning the offsets for the last line.\r\n\t\t\t# Try one character back.\r\n\t\t\t(start,end)=self.obj.jabContext.getAccessibleTextLineBounds(offset-1)\r\n\t\t#Java gives end as the last character, not one past the last character\r\n\t\tend=end+1\r\n\t\treturn (start,end)\r\n\r\n\tdef _getParagraphOffsets(self,offset):\r\n\t\treturn self._getLineOffsets(offset)\r\n\r\n\tdef _getFormatFieldAndOffsets(self, offset, formatConfig, calculateOffsets=True):\r\n\t\tattribs: JABHandler.AccessibleTextAttributesInfo\r\n\t\tattribs, length = self.obj.jabContext.getTextAttributesInRange(offset, self._endOffset - 1)\r\n\t\tfield = textInfos.FormatField()\r\n\t\tfield[\"font-family\"] = attribs.fontFamily\r\n\t\t# Translators: Abbreviation for points, a measurement of font size.\r\n\t\tfield[\"font-size\"] = pgettext(\"font size\", \"%s pt\") % str(attribs.fontSize)\r\n\t\tfield[\"bold\"] = bool(attribs.bold)\r\n\t\tfield[\"italic\"] = bool(attribs.italic)\r\n\t\tfield[\"strikethrough\"] = bool(attribs.strikethrough)\r\n\t\tfield[\"underline\"] = bool(attribs.underline)\r\n\t\tif attribs.superscript:\r\n\t\t\tfield[\"text-position\"] = TextPosition.SUPERSCRIPT\r\n\t\telif attribs.subscript:\r\n\t\t\tfield[\"text-position\"] = TextPosition.SUBSCRIPT\r\n\t\telse:\r\n\t\t\tfield[\"text-position\"] = TextPosition.BASELINE\r\n\t\t# TODO: Not sure how to interpret Java's alignment numbers.\r\n\t\treturn field, (offset, offset + length)\r\n\r\n\tdef getEmbeddedObject(self, offset=0):\r\n\t\toffset += self._startOffset\r\n\r\n\t\t# We need to count the embedded objects to determine which child to use.\r\n\t\t# This could possibly be optimised by caching.\r\n\t\ttext = self._getTextRange(0, offset + 1)\r\n\t\tchildIndex = text.count(textUtils.OBJ_REPLACEMENT_CHAR) - 1\r\n\t\tjabContext=self.obj.jabContext.getAccessibleChildFromContext(childIndex)\r\n\t\tif jabContext:\r\n\t\t\treturn JAB(jabContext=jabContext)\r\n\r\n\t\traise LookupError\r\n\r\nclass JAB(Window):\r\n\r\n\tdef findOverlayClasses(self,clsList):\r\n\t\trole = self.JABRole\r\n\t\tif self._JABAccContextInfo.accessibleText and role in (\"text\",\"password text\",\"edit bar\",\"view port\",\"paragraph\"):\r\n\t\t\tclsList.append(EditableTextWithoutAutoSelectDetection)\r\n\t\telif role in (\"dialog\", \"alert\"):\r\n\t\t\tclsList.append(Dialog)\r\n\t\telif role==\"combo box\":\r\n\t\t\tclsList.append(ComboBox)\r\n\t\telif role==\"table\":\r\n\t\t\tclsList.append(Table)\r\n\t\telif self.parent and isinstance(self.parent,Table) and self.parent._jabTableInfo:\r\n\t\t\tclsList.append(TableCell)\r\n\t\telif role == \"progress bar\":\r\n\t\t\tclsList.append(ProgressBar)\r\n\r\n\t\tclsList.append(JAB)\r\n\r\n\t@classmethod\r\n\tdef kwargsFromSuper(cls,kwargs,relation=None):\r\n\t\tjabContext=None\r\n\t\twindowHandle=kwargs['windowHandle']\r\n\t\tif relation==\"focus\":\r\n\t\t\tvmID=ctypes.c_int()\r\n\t\t\taccContext=JABHandler.JOBJECT64()\r\n\t\t\tJABHandler.bridgeDll.getAccessibleContextWithFocus(windowHandle,ctypes.byref(vmID),ctypes.byref(accContext))\r\n\t\t\tjabContext=JABHandler.JABContext(hwnd=windowHandle,vmID=vmID.value,accContext=accContext.value)\r\n\t\telif isinstance(relation,tuple):\r\n\t\t\tjabContext=JABHandler.JABContext(hwnd=windowHandle)\r\n\t\t\tif jabContext:\r\n\t\t\t\tjabContext=jabContext.getAccessibleContextAt(*relation)\r\n\t\telse:\r\n\t\t\tjabContext=JABHandler.JABContext(hwnd=windowHandle)\r\n\t\tif not jabContext:\r\n\t\t\treturn False\r\n\t\tkwargs['jabContext']=jabContext\r\n\t\treturn True\r\n\r\n\tdef __init__(self,relation=None,windowHandle=None,jabContext=None):\r\n\t\tif not windowHandle:\r\n\t\t\twindowHandle=jabContext.hwnd\r\n\t\tself.windowHandle=windowHandle\r\n\t\tself.jabContext=jabContext\r\n\t\tsuper(JAB,self).__init__(windowHandle=windowHandle)\r\n\t\ttry:\r\n\t\t\tself._JABAccContextInfo\r\n\t\texcept RuntimeError:\r\n\t\t\traise InvalidNVDAObject(\"Could not get accessible context info\")\r\n\r\n\tdef _get__JABAccContextInfo(self):\r\n\t\treturn self.jabContext.getAccessibleContextInfo()\r\n\r\n\tdef _get_TextInfo(self):\r\n\t\tif self._JABAccContextInfo.accessibleText and self.role not in [controlTypes.Role.BUTTON,controlTypes.Role.MENUITEM,controlTypes.Role.MENU,controlTypes.Role.LISTITEM]:\r\n\t\t\treturn JABTextInfo\r\n\t\treturn super(JAB,self).TextInfo\r\n\r\n\tdef _isEqual(self,other):\r\n\t\ttry:\r\n\t\t\treturn self.jabContext==other.jabContext\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n\tdef _get_keyboardShortcut(self):\r\n\t\tbindings=self.jabContext.getAccessibleKeyBindings()\r\n\t\tif not bindings or bindings.keyBindingsCount<1: \r\n\t\t\treturn None\r\n\t\tshortcutsList=[]\r\n\t\tfor index in range(bindings.keyBindingsCount):\r\n\t\t\tbinding=bindings.keyBindingInfo[index]\r\n\t\t\t# We don't support these modifiers\r\n\t\t\tif binding.modifiers & (\r\n\t\t\t\tJABHandler.AccessibleKeystroke.META\r\n\t\t\t\t| JABHandler.AccessibleKeystroke.ALT_GRAPH\r\n\t\t\t\t| JABHandler.AccessibleKeystroke.BUTTON1\r\n\t\t\t\t| JABHandler.AccessibleKeystroke.BUTTON2\r\n\t\t\t\t| JABHandler.AccessibleKeystroke.BUTTON3\r\n\t\t\t):\r\n\t\t\t\tcontinue\r\n\t\t\tmodifiers = binding.modifiers\r\n\t\t\t# We assume alt if there are no modifiers at all and its not a menu item as this is clearly a nmonic\r\n\t\t\tif not modifiers and self.role != controlTypes.Role.MENUITEM:\r\n\t\t\t\tmodifiers |= JABHandler.AccessibleKeystroke.ALT\r\n\t\t\tkeyList = [\r\n\t\t\t\tkeyLabels.localizedKeyLabels.get(l, l)\r\n\t\t\t\tfor l in JABHandler._getKeyLabels(modifiers, binding.character)\r\n\t\t\t]\r\n\t\t\tshortcutsList.append(\"+\".join(keyList))\r\n\t\treturn \", \".join(shortcutsList)\r\n\r\n\tdef _get_name(self):\r\n\t\tname = self._JABAccContextInfo.name\r\n\t\treturn _processHtml(name)\r\n\r\n\tdef _get_JABRole(self):\r\n\t\treturn self._JABAccContextInfo.role_en_US\r\n\r\n\tdef _get_role(self):\r\n\t\trole = JABRolesToNVDARoles.get(self.JABRole,controlTypes.Role.UNKNOWN)\r\n\t\tif role in ( controlTypes.Role.LABEL, controlTypes.Role.PANEL) and self.parent:\r\n\t\t\tparentRole = self.parent.role\r\n\t\t\tif parentRole == controlTypes.Role.LIST:\r\n\t\t\t\treturn controlTypes.Role.LISTITEM\r\n\t\t\telif parentRole in (controlTypes.Role.TREEVIEW, controlTypes.Role.TREEVIEWITEM):\r\n\t\t\t\treturn controlTypes.Role.TREEVIEWITEM\r\n\t\tif role==controlTypes.Role.LABEL:\r\n\t\t\treturn controlTypes.Role.STATICTEXT\r\n\t\treturn role\r\n\r\n\tdef _get_JABStates(self):\r\n\t\treturn self._JABAccContextInfo.states_en_US\r\n\r\n\tdef _get_states(self):\r\n\t\tlog.debug(\"states: %s\"%self.JABStates)\r\n\t\tstateSet=set()\r\n\t\tstateString=self.JABStates\r\n\t\tstateStrings=stateString.split(',')\r\n\t\tfor state in stateStrings:\r\n\t\t\tif state in JABStatesToNVDAStates:\r\n\t\t\t\tstateSet.add(JABStatesToNVDAStates[state])\r\n\t\tif self.role is controlTypes.Role.TOGGLEBUTTON and controlTypes.State.CHECKED in stateSet:\r\n\t\t\tstateSet.discard(controlTypes.State.CHECKED)\r\n\t\t\tstateSet.add(controlTypes.State.PRESSED)\r\n\t\tif \"editable\" not in stateStrings and self._JABAccContextInfo.accessibleText:\r\n\t\t\tstateSet.add(controlTypes.State.READONLY)\r\n\t\tif \"visible\" not in stateStrings:\r\n\t\t\tstateSet.add(controlTypes.State.INVISIBLE)\r\n\t\tif \"showing\" not in stateStrings:\r\n\t\t\tstateSet.add(controlTypes.State.OFFSCREEN)\r\n\t\tif \"expandable\" not in stateStrings:\r\n\t\t\tstateSet.discard(controlTypes.State.COLLAPSED)\r\n\t\tif \"enabled\" not in stateStrings:\r\n\t\t\tstateSet.add(controlTypes.State.UNAVAILABLE)\r\n\t\treturn stateSet\r\n\r\n\tdef _get_value(self):\r\n\t\tif (\r\n\t\t\tself.role not in [\r\n\t\t\t\tcontrolTypes.Role.TOGGLEBUTTON, controlTypes.Role.CHECKBOX,\r\n\t\t\t\tcontrolTypes.Role.MENU, controlTypes.Role.MENUITEM,\r\n\t\t\t\tcontrolTypes.Role.RADIOBUTTON, controlTypes.Role.BUTTON\r\n\t\t\t]\r\n\t\t\tand self._JABAccContextInfo.accessibleValue\r\n\t\t\tand not self._JABAccContextInfo.accessibleText\r\n\t\t):\r\n\t\t\treturn self.jabContext.getCurrentAccessibleValueFromContext()\r\n\r\n\tdef _get_description(self):\r\n\t\tdescription = self._JABAccContextInfo.description\r\n\t\treturn _processHtml(description)\r\n\r\n\tdef _get_location(self):\r\n\t\treturn RectLTWH(self._JABAccContextInfo.x,self._JABAccContextInfo.y,self._JABAccContextInfo.width,self._JABAccContextInfo.height)\r\n\r\n\tdef _get_hasFocus(self):\r\n\t\tif controlTypes.State.FOCUSED in self.states:\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False\r\n\r\n\tdef _get_positionInfo(self):\r\n\t\tinfo=super(JAB,self).positionInfo or {}\r\n\r\n\t\t# If tree view item, try to retrieve the level via JAB\r\n\t\tif self.role==controlTypes.Role.TREEVIEWITEM:\r\n\t\t\ttry:\r\n\t\t\t\ttree=self.jabContext.getAccessibleParentWithRole(\"tree\")\r\n\t\t\t\tif tree:\r\n\t\t\t\t\ttreeDepth=tree.getObjectDepth()\r\n\t\t\t\t\tselfDepth=self.jabContext.getObjectDepth()\r\n\t\t\t\t\tif selfDepth > treeDepth:\r\n\t\t\t\t\t\tinfo['level']=selfDepth-treeDepth\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\r\n\t\ttargets=self._getJABRelationTargets('memberOf')\r\n\t\tfor index,target in enumerate(targets):\r\n\t\t\tif target==self.jabContext:\r\n\t\t\t\tinfo['indexInGroup']=index+1\r\n\t\t\t\tinfo['similarItemsInGroup']=len(targets)\r\n\t\t\t\treturn info\r\n\r\n\t\tparent=self.parent\r\n\t\tif (\r\n\t\t\tisinstance(parent, JAB)\r\n\t\t\tand self.role in (\r\n\t\t\t\tcontrolTypes.Role.TREEVIEWITEM,\r\n\t\t\t\tcontrolTypes.Role.LISTITEM,\r\n\t\t\t\tcontrolTypes.Role.TAB\r\n\t\t\t)\r\n\t\t):\r\n\t\t\tindex=self._JABAccContextInfo.indexInParent+1\r\n\t\t\tchildCount=parent._JABAccContextInfo.childrenCount\r\n\t\t\tinfo['indexInGroup']=index\r\n\t\t\tinfo['similarItemsInGroup']=childCount\r\n\t\treturn info\r\n\r\n\tdef _get_activeChild(self):\r\n\t\tjabContext=self.jabContext.getActiveDescendent()\r\n\t\tif jabContext:\r\n\t\t\treturn JAB(jabContext=jabContext)\r\n\t\telse:\r\n\t\t\treturn None\r\n\r\n\tdef _get_parent(self):\r\n\t\tif not hasattr(self,'_parent'):\r\n\t\t\tjabContext=self.jabContext.getAccessibleParentFromContext()\r\n\t\t\tif jabContext and self.indexInParent is not None:\r\n\t\t\t\tself._parent=JAB(jabContext=jabContext)\r\n\t\t\telse:\r\n\t\t\t\tself._parent=super(JAB,self).parent\r\n\t\treturn self._parent\r\n \r\n\tdef _get_next(self):\r\n\t\tparent=self.parent\r\n\t\tif not isinstance(parent,JAB):\r\n\t\t\treturn super(JAB,self).next\r\n\t\tif self.indexInParent is None:\r\n\t\t\treturn None\r\n\t\tnewIndex=self.indexInParent+1\r\n\t\tif newIndex>=parent._JABAccContextInfo.childrenCount:\r\n\t\t\treturn None\r\n\t\tjabContext=parent.jabContext.getAccessibleChildFromContext(newIndex)\r\n\t\tif not jabContext:\r\n\t\t\treturn None\r\n\t\tobj=JAB(jabContext=jabContext)\r\n\t\tif not isinstance(obj.parent,JAB):\r\n\t\t\tobj.parent=parent\r\n\t\tif obj.indexInParent is None:\r\n\t\t\tobj.indexInParent=newIndex\r\n\t\telif obj.indexInParent<=self.indexInParent: \r\n\t\t\treturn None\r\n\t\treturn obj\r\n\r\n\tdef _get_previous(self):\r\n\t\tparent=self.parent\r\n\t\tif not isinstance(parent,JAB):\r\n\t\t\treturn super(JAB,self).previous\r\n\t\tif self.indexInParent is None:\r\n\t\t\treturn None\r\n\t\tnewIndex=self.indexInParent-1\r\n\t\tif newIndex<0:\r\n\t\t\treturn None\r\n\t\tjabContext=parent.jabContext.getAccessibleChildFromContext(newIndex)\r\n\t\tif not jabContext:\r\n\t\t\treturn None\r\n\t\tobj=JAB(jabContext=jabContext)\r\n\t\tif not isinstance(obj.parent,JAB):\r\n\t\t\tobj.parent=parent\r\n\t\tif obj.indexInParent is None:\r\n\t\t\tobj.indexInParent=newIndex\r\n\t\telif obj.indexInParent>=self.indexInParent: \r\n\t\t\treturn None\r\n\t\treturn obj\r\n\r\n\tdef _get_firstChild(self):\r\n\t\tif self._JABAccContextInfo.childrenCount<=0:\r\n\t\t\treturn None\r\n\t\tjabContext=self.jabContext.getAccessibleChildFromContext(0)\r\n\t\tif jabContext:\r\n\t\t\tobj=JAB(jabContext=jabContext)\r\n\t\t\tif not isinstance(obj.parent,JAB):\r\n\t\t\t\tobj.parent=self\r\n\t\t\tif obj.indexInParent is None:\r\n\t\t\t\tobj.indexInParent=0\r\n\t\t\treturn obj\r\n\t\telse:\r\n\t\t\treturn None\r\n\r\n\tdef _get_lastChild(self):\r\n\t\tif self._JABAccContextInfo.childrenCount<=0:\r\n\t\t\treturn None\r\n\t\tjabContext=self.jabContext.getAccessibleChildFromContext(self.childCount-1)\r\n\t\tif jabContext:\r\n\t\t\tobj=JAB(jabContext=jabContext)\r\n\t\t\tif not isinstance(obj.parent,JAB):\r\n\t\t\t\tobj.parent=self\r\n\t\t\tif obj.indexInParent is None:\r\n\t\t\t\tobj.indexInParent=self.childCount-1\r\n\t\t\treturn obj\r\n\t\telse:\r\n\t\t\treturn None\r\n\r\n\tdef _get_childCount(self):\r\n\t\treturn self._JABAccContextInfo.childrenCount\r\n\r\n\tdef _get_children(self):\r\n\t\tchildren=[]\r\n\t\tfor index in range(self._JABAccContextInfo.childrenCount):\r\n\t\t\tjabContext=self.jabContext.getAccessibleChildFromContext(index)\r\n\t\t\tif jabContext:\r\n\t\t\t\tobj=JAB(jabContext=jabContext)\r\n\t\t\t\tif not isinstance(obj.parent,JAB):\r\n\t\t\t\t\tobj.parent=self\r\n\t\t\t\tif obj.indexInParent is None:\r\n\t\t\t\t\tobj.indexInParent=index\r\n\t\t\t\tchildren.append(obj)\r\n\t\treturn children\r\n\r\n\tdef _get_indexInParent(self):\r\n\t\tindex = self._JABAccContextInfo.indexInParent\r\n\t\tif index == -1:\r\n\t\t\treturn None\r\n\t\treturn index\r\n\r\n\tdef _getJABRelationTargets(self, key):\r\n\t\trs = self.jabContext.getAccessibleRelationSet()\r\n\t\ttargets=[]\r\n\t\tfor relation in rs.relations[:rs.relationCount]:\r\n\t\t\tfor target in relation.targets[:relation.targetCount]:\r\n\t\t\t\tif relation.key == key:\r\n\t\t\t\t\ttargets.append(JABHandler.JABContext(self.jabContext.hwnd, self.jabContext.vmID, target))\r\n\t\t\t\telse:\r\n\t\t\t\t\tJABHandler.bridgeDll.releaseJavaObject(self.jabContext.vmID,target)\r\n\t\treturn targets\r\n\r\n\tdef _get_flowsTo(self):\r\n\t\ttargets=self._getJABRelationTargets(\"flowsTo\")\r\n\t\tif targets:\r\n\t\t\treturn targets[0]\r\n\r\n\tdef _get_flowsFrom(self):\r\n\t\ttargets=self._getJABRelationTargets(\"flowsFrom\")\r\n\t\tif targets:\r\n\t\t\treturn targets[0]\r\n\r\n\tdef reportFocus(self):\r\n\t\tparent=self.parent\r\n\t\tif self.role in [controlTypes.Role.LIST] and isinstance(parent,JAB) and parent.role==controlTypes.Role.COMBOBOX:\r\n\t\t\treturn\r\n\t\tsuper(JAB,self).reportFocus()\r\n\r\n\tdef _get__actions(self):\r\n\t\tactions = JABHandler.AccessibleActions()\r\n\t\tJABHandler.bridgeDll.getAccessibleActions(self.jabContext.vmID, self.jabContext.accContext, actions)\r\n\t\treturn actions.actionInfo[:actions.actionsCount]\r\n\r\n\tdef _get_actionCount(self):\r\n\t\treturn len(self._actions)\r\n\r\n\tdef getActionName(self, index=None):\r\n\t\tif index is None:\r\n\t\t\tindex = self.defaultActionIndex\r\n\t\ttry:\r\n\t\t\treturn self._actions[index].name\r\n\t\texcept IndexError:\r\n\t\t\traise NotImplementedError\r\n\r\n\tdef doAction(self, index=None):\r\n\t\tif index is None:\r\n\t\t\tindex = self.defaultActionIndex\r\n\t\ttry:\r\n\t\t\tJABHandler.bridgeDll.doAccessibleActions(self.jabContext.vmID, self.jabContext.accContext,\r\n\t\t\t\tJABHandler.AccessibleActionsToDo(actionsCount=1, actions=(self._actions[index],)),\r\n\t\t\t\tJABHandler.jint())\r\n\t\texcept (IndexError, RuntimeError):\r\n\t\t\traise NotImplementedError\r\n\r\n\tdef _get_activeDescendant(self):\r\n\t\tdescendantFound=False\r\n\t\tjabContext=self.jabContext\r\n\t\twhile jabContext:\r\n\t\t\ttry:\r\n\t\t\t\ttempContext=jabContext.getActiveDescendent()\r\n\t\t\texcept:\r\n\t\t\t\tbreak\r\n\t\t\tif not tempContext:\r\n\t\t\t\tbreak\r\n\t\t\ttry:\r\n\t\t\t\tdepth=tempContext.getObjectDepth()\r\n\t\t\texcept:\r\n\t\t\t\tdepth=-1\r\n\t\t\tif depth<=0 or tempContext==jabContext: \r\n\t\t\t\tbreak\r\n\t\t\tjabContext=tempContext\r\n\t\t\tdescendantFound=True\r\n\t\tif descendantFound:\r\n\t\t\treturn JAB(jabContext=jabContext)\r\n\r\n\tdef event_gainFocus(self):\r\n\t\tif eventHandler.isPendingEvents(\"gainFocus\"):\r\n\t\t\treturn\r\n\t\tsuper(JAB,self).event_gainFocus()\r\n\t\tif eventHandler.isPendingEvents(\"gainFocus\"):\r\n\t\t\treturn\r\n\t\tactiveDescendant=self.activeDescendant\r\n\t\tif activeDescendant:\r\n\t\t\teventHandler.queueEvent(\"gainFocus\",activeDescendant)\r\n\r\n\r\nclass ComboBox(JAB):\r\n\r\n\tdef _get_states(self):\r\n\t\tstates=super(ComboBox,self).states\r\n\t\tif controlTypes.State.COLLAPSED not in states and controlTypes.State.EXPANDED not in states:\r\n\t\t\tif self.childCount==1 and self.firstChild and self.firstChild.role==controlTypes.Role.POPUPMENU:\r\n\t\t\t\tif controlTypes.State.INVISIBLE in self.firstChild.states:\r\n\t\t\t\t\tstates.add(controlTypes.State.COLLAPSED)\r\n\t\t\t\telse:\r\n\t\t\t\t\tstates.add(controlTypes.State.EXPANDED)\r\n\t\treturn states\r\n\r\n\tdef _get_activeDescendant(self):\r\n\t\tif controlTypes.State.COLLAPSED in self.states:\r\n\t\t\treturn None\r\n\t\treturn super(ComboBox,self).activeDescendant\r\n\r\n\tdef _get_value(self):\r\n\t\tvalue=super(ComboBox,self).value\r\n\t\tif not value and not self.activeDescendant: \r\n\t\t\tdescendant=super(ComboBox,self).activeDescendant\r\n\t\t\tif descendant:\r\n\t\t\t\tvalue=descendant.name\r\n\t\treturn value\r\n\r\nclass Table(JAB):\r\n\r\n\tdef _get__jabTableInfo(self):\r\n\t\tinfo=self.jabContext.getAccessibleTableInfo()\r\n\t\tif info:\r\n\t\t\tself._jabTableInfo=info\r\n\t\t\treturn info\r\n\r\n\tdef _get_rowCount(self):\r\n\t\tif self._jabTableInfo:\r\n\t\t\treturn self._jabTableInfo.rowCount\r\n\r\n\tdef _get_columnCount(self):\r\n\t\tif self._jabTableInfo:\r\n\t\t\treturn self._jabTableInfo.columnCount\r\n\r\n\tdef _get_tableID(self):\r\n\t\treturn self._jabTableInfo.jabTable.accContext.value\r\n\r\nclass TableCell(JAB):\r\n\r\n\trole=controlTypes.Role.TABLECELL\r\n\r\n\tdef _get_table(self):\r\n\t\tif self.parent and isinstance(self.parent,Table):\r\n\t\t\tself.table=self.parent\r\n\t\t\treturn self.table\r\n\r\n\tdef _get_tableID(self):\r\n\t\treturn self.table.tableID\r\n\r\n\tdef _get_rowNumber(self):\r\n\t\treturn self.table._jabTableInfo.jabTable.getAccessibleTableRow(self.indexInParent)+1\r\n\r\n\tdef _get_columnNumber(self):\r\n\t\treturn self.table._jabTableInfo.jabTable.getAccessibleTableColumn(self.indexInParent)+1\r\n\r\n\tdef _get_rowHeaderText(self):\r\n\t\theaderTableInfo=self.table.jabContext.getAccessibleTableRowHeader()\r\n\t\tif headerTableInfo and headerTableInfo.jabTable:\r\n\t\t\ttextList=[]\r\n\t\t\trow=self.rowNumber-1\r\n\t\t\tfor col in range(headerTableInfo.columnCount):\r\n\t\t\t\tcellInfo=headerTableInfo.jabTable.getAccessibleTableCellInfo(row,col)\r\n\t\t\t\tif cellInfo and cellInfo.jabContext:\r\n\t\t\t\t\tobj=JAB(jabContext=cellInfo.jabContext)\r\n\t\t\t\t\tif obj.name: textList.append(obj.name)\r\n\t\t\t\t\tif obj.description: textList.append(obj.description)\r\n\t\t\tjabContext=self.table._jabTableInfo.jabTable.getAccessibleTableRowDescription(row)\r\n\t\t\tif jabContext:\r\n\t\t\t\tobj=JAB(jabContext=jabContext)\r\n\t\t\t\tif obj.name: textList.append(obj.name)\r\n\t\t\t\tif obj.description: textList.append(obj.description)\r\n\t\t\treturn \" \".join(textList)\r\n\r\n\tdef _get_columnHeaderText(self):\r\n\t\theaderTableInfo=self.table.jabContext.getAccessibleTableColumnHeader()\r\n\t\tif headerTableInfo and headerTableInfo.jabTable:\r\n\t\t\ttextList=[]\r\n\t\t\tcol=self.columnNumber-1\r\n\t\t\tfor row in range(headerTableInfo.rowCount):\r\n\t\t\t\tcellInfo=headerTableInfo.jabTable.getAccessibleTableCellInfo(row,col)\r\n\t\t\t\tif cellInfo and cellInfo.jabContext:\r\n\t\t\t\t\tobj=JAB(jabContext=cellInfo.jabContext)\r\n\t\t\t\t\tif obj.name: textList.append(obj.name)\r\n\t\t\t\t\tif obj.description: textList.append(obj.description)\r\n\t\t\tjabContext=self.table._jabTableInfo.jabTable.getAccessibleTableColumnDescription(col)\r\n\t\t\tif jabContext:\r\n\t\t\t\tobj=JAB(jabContext=jabContext)\r\n\t\t\t\tif obj.name: textList.append(obj.name)\r\n\t\t\t\tif obj.description: textList.append(obj.description)\r\n\t\t\treturn \" \".join(textList)\r\n",
"path": "source/NVDAObjects/JAB/__init__.py"
}
] | diff --git a/source/NVDAObjects/JAB/__init__.py b/source/NVDAObjects/JAB/__init__.py
index f0abaf44d87..57f4ddc31a8 100644
--- a/source/NVDAObjects/JAB/__init__.py
+++ b/source/NVDAObjects/JAB/__init__.py
@@ -98,6 +98,7 @@
"multi_line":controlTypes.State.MULTILINE,
"focusable":controlTypes.State.FOCUSABLE,
"editable":controlTypes.State.EDITABLE,
+ "selectable": controlTypes.State.SELECTABLE,
}
diff --git a/user_docs/en/changes.t2t b/user_docs/en/changes.t2t
index dc4fc3e0b88..5e13e136538 100644
--- a/user_docs/en/changes.t2t
+++ b/user_docs/en/changes.t2t
@@ -20,6 +20,7 @@ What's New in NVDA
== Changes ==
- Updated Sonic rate boost library to commit ``1d70513``. (#14180)
- CLDR has been updated to version 42.0. (#14273)
+- Java applications with controls using the selectable state will now announce when an item is not selected rather than when the item is selected. (#14336)
-
|
jazzband__django-axes-1095 | FEATURE REQUEST: Logging without sensitive data by default (privacy by design)
Hi @aleksihakli
we found out that AXES logs IP and username to the configured Django log on a failed attempt. The IP address is critical by definition but the username is very often the email address of the user - so it's even more critical.
I would love to see a solution where we avoid logging any sensitive information - unless you explicitly want this and enable it.
So my suggestion:
* `AXES_VERBOSE` is set to `False` by default
* The `get_client_str` method does not print the username when verbose mode is off but the User ID. This will help you as much as the username to find the specific user but you avoid spilling the data everywhere (like some fancy server or cloud logs)
What do you think? Would you be open for a PR?
Best
Ronny
| [
{
"content": "from django.conf import settings\nfrom django.utils.translation import gettext_lazy as _\n\n# disable plugin when set to False\nsettings.AXES_ENABLED = getattr(settings, \"AXES_ENABLED\", True)\n\n# see if the user has overridden the failure limit\nsettings.AXES_FAILURE_LIMIT = getattr(settings, \"AXES_FAILURE_LIMIT\", 3)\n\n# see if the user has set axes to lock out logins after failure limit\nsettings.AXES_LOCK_OUT_AT_FAILURE = getattr(settings, \"AXES_LOCK_OUT_AT_FAILURE\", True)\n\n# lockout parameters\n# default value will be [\"ip_address\"] after removing AXES_LOCK_OUT params support\nsettings.AXES_LOCKOUT_PARAMETERS = getattr(settings, \"AXES_LOCKOUT_PARAMETERS\", None)\n\n# TODO: remove it in future versions\nif settings.AXES_LOCKOUT_PARAMETERS is None:\n if getattr(settings, \"AXES_ONLY_USER_FAILURES\", False):\n settings.AXES_LOCKOUT_PARAMETERS = [\"username\"]\n else:\n if getattr(settings, \"AXES_LOCK_OUT_BY_USER_OR_IP\", False):\n settings.AXES_LOCKOUT_PARAMETERS = [\"username\", \"ip_address\"]\n elif getattr(settings, \"AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP\", False):\n settings.AXES_LOCKOUT_PARAMETERS = [[\"username\", \"ip_address\"]]\n else:\n settings.AXES_LOCKOUT_PARAMETERS = [\"ip_address\"]\n\n if getattr(settings, \"AXES_USE_USER_AGENT\", False):\n if isinstance(settings.AXES_LOCKOUT_PARAMETERS[0], str):\n settings.AXES_LOCKOUT_PARAMETERS[0] = [\n settings.AXES_LOCKOUT_PARAMETERS[0],\n \"user_agent\",\n ]\n else:\n settings.AXES_LOCKOUT_PARAMETERS[0].append(\"user_agent\")\n\n# lock out just for admin site\nsettings.AXES_ONLY_ADMIN_SITE = getattr(settings, \"AXES_ONLY_ADMIN_SITE\", False)\n\n# show Axes logs in admin\nsettings.AXES_ENABLE_ADMIN = getattr(settings, \"AXES_ENABLE_ADMIN\", True)\n\n# use a specific username field to retrieve from login POST data\nsettings.AXES_USERNAME_FORM_FIELD = getattr(\n settings, \"AXES_USERNAME_FORM_FIELD\", \"username\"\n)\n\n# use a specific password field to retrieve from login POST data\nsettings.AXES_PASSWORD_FORM_FIELD = getattr(\n settings, \"AXES_PASSWORD_FORM_FIELD\", \"password\"\n) # noqa\n\n# use a provided callable to transform the POSTed username into the one used in credentials\nsettings.AXES_USERNAME_CALLABLE = getattr(settings, \"AXES_USERNAME_CALLABLE\", None)\n\n# determine if given user should be always allowed to attempt authentication\nsettings.AXES_WHITELIST_CALLABLE = getattr(settings, \"AXES_WHITELIST_CALLABLE\", None)\n\n# return custom lockout response if configured\nsettings.AXES_LOCKOUT_CALLABLE = getattr(settings, \"AXES_LOCKOUT_CALLABLE\", None)\n\n# use a provided callable to get client ip address\nsettings.AXES_CLIENT_IP_CALLABLE = getattr(settings, \"AXES_CLIENT_IP_CALLABLE\", None)\n\n# reset the number of failed attempts after one successful attempt\nsettings.AXES_RESET_ON_SUCCESS = getattr(settings, \"AXES_RESET_ON_SUCCESS\", False)\n\nsettings.AXES_DISABLE_ACCESS_LOG = getattr(settings, \"AXES_DISABLE_ACCESS_LOG\", False)\n\nsettings.AXES_ENABLE_ACCESS_FAILURE_LOG = getattr(\n settings, \"AXES_ENABLE_ACCESS_FAILURE_LOG\", False\n)\n\nsettings.AXES_ACCESS_FAILURE_LOG_PER_USER_LIMIT = getattr(\n settings, \"AXES_ACCESS_FAILURE_LOG_PER_USER_LIMIT\", 1000\n)\n\nsettings.AXES_HANDLER = getattr(\n settings, \"AXES_HANDLER\", \"axes.handlers.database.AxesDatabaseHandler\"\n)\n\nsettings.AXES_LOCKOUT_TEMPLATE = getattr(settings, \"AXES_LOCKOUT_TEMPLATE\", None)\n\nsettings.AXES_LOCKOUT_URL = getattr(settings, \"AXES_LOCKOUT_URL\", None)\n\nsettings.AXES_COOLOFF_TIME = getattr(settings, \"AXES_COOLOFF_TIME\", None)\n\nsettings.AXES_VERBOSE = getattr(settings, \"AXES_VERBOSE\", settings.AXES_ENABLED)\n\n# whitelist and blacklist\nsettings.AXES_NEVER_LOCKOUT_WHITELIST = getattr(\n settings, \"AXES_NEVER_LOCKOUT_WHITELIST\", False\n)\n\nsettings.AXES_NEVER_LOCKOUT_GET = getattr(settings, \"AXES_NEVER_LOCKOUT_GET\", False)\n\nsettings.AXES_ONLY_WHITELIST = getattr(settings, \"AXES_ONLY_WHITELIST\", False)\n\nsettings.AXES_IP_WHITELIST = getattr(settings, \"AXES_IP_WHITELIST\", None)\n\nsettings.AXES_IP_BLACKLIST = getattr(settings, \"AXES_IP_BLACKLIST\", None)\n\n# message to show when locked out and have cooloff enabled\nsettings.AXES_COOLOFF_MESSAGE = getattr(\n settings,\n \"AXES_COOLOFF_MESSAGE\",\n _(\"Account locked: too many login attempts. Please try again later.\"),\n)\n\n# message to show when locked out and have cooloff disabled\nsettings.AXES_PERMALOCK_MESSAGE = getattr(\n settings,\n \"AXES_PERMALOCK_MESSAGE\",\n _(\n \"Account locked: too many login attempts. Contact an admin to unlock your account.\"\n ),\n)\n\n# set CORS allowed origins when calling authentication over ajax\nsettings.AXES_ALLOWED_CORS_ORIGINS = getattr(settings, \"AXES_ALLOWED_CORS_ORIGINS\", \"*\")\n\n# set the list of sensitive parameters to cleanse from get/post data before logging\nsettings.AXES_SENSITIVE_PARAMETERS = getattr(\n settings,\n \"AXES_SENSITIVE_PARAMETERS\",\n [],\n)\n\n# set the callable for the readable string that can be used in\n# e.g. logging to distinguish client requests\nsettings.AXES_CLIENT_STR_CALLABLE = getattr(settings, \"AXES_CLIENT_STR_CALLABLE\", None)\n\n# set the HTTP response code given by too many requests\nsettings.AXES_HTTP_RESPONSE_CODE = getattr(settings, \"AXES_HTTP_RESPONSE_CODE\", 429)\n\n# If True, a failed login attempt during lockout will reset the cool off period\nsettings.AXES_RESET_COOL_OFF_ON_FAILURE_DURING_LOCKOUT = getattr(\n settings, \"AXES_RESET_COOL_OFF_ON_FAILURE_DURING_LOCKOUT\", True\n)\n\n\n###\n# django-ipware settings for client IP address calculation and proxy detection\n# there are old AXES_PROXY_ and AXES_META_ legacy keys present for backwards compatibility\n# see https://github.com/un33k/django-ipware for further details\n###\n\n# if your deployment is using reverse proxies, set this value to 'left-most' or 'right-most' per your configuration\nsettings.AXES_IPWARE_PROXY_ORDER = getattr(\n settings,\n \"AXES_IPWARE_PROXY_ORDER\",\n getattr(settings, \"AXES_PROXY_ORDER\", \"left-most\"),\n)\n\n# if your deployment is using reverse proxies, set this value to the number of proxies in front of Django\nsettings.AXES_IPWARE_PROXY_COUNT = getattr(\n settings,\n \"AXES_IPWARE_PROXY_COUNT\",\n getattr(settings, \"AXES_PROXY_COUNT\", None),\n)\n\n# if your deployment is using reverse proxies, set to your trusted proxy IP addresses prefixes if needed\nsettings.AXES_IPWARE_PROXY_TRUSTED_IPS = getattr(\n settings,\n \"AXES_IPWARE_PROXY_TRUSTED_IPS\",\n getattr(settings, \"AXES_PROXY_TRUSTED_IPS\", None),\n)\n\n# set to the names of request.META attributes that should be checked for the IP address of the client\n# if your deployment is using reverse proxies, ensure that the header attributes are securely set by the proxy\n# ensure that the client can not spoof the headers by setting them and sending them through the proxy\nsettings.AXES_IPWARE_META_PRECEDENCE_ORDER = getattr(\n settings,\n \"AXES_IPWARE_META_PRECEDENCE_ORDER\",\n getattr(\n settings,\n \"AXES_META_PRECEDENCE_ORDER\",\n getattr(settings, \"IPWARE_META_PRECEDENCE_ORDER\", (\"REMOTE_ADDR\",)),\n ),\n)\n",
"path": "axes/conf.py"
}
] | [
{
"content": "from django.conf import settings\nfrom django.utils.translation import gettext_lazy as _\n\n# disable plugin when set to False\nsettings.AXES_ENABLED = getattr(settings, \"AXES_ENABLED\", True)\n\n# see if the user has overridden the failure limit\nsettings.AXES_FAILURE_LIMIT = getattr(settings, \"AXES_FAILURE_LIMIT\", 3)\n\n# see if the user has set axes to lock out logins after failure limit\nsettings.AXES_LOCK_OUT_AT_FAILURE = getattr(settings, \"AXES_LOCK_OUT_AT_FAILURE\", True)\n\n# lockout parameters\n# default value will be [\"ip_address\"] after removing AXES_LOCK_OUT params support\nsettings.AXES_LOCKOUT_PARAMETERS = getattr(settings, \"AXES_LOCKOUT_PARAMETERS\", None)\n\n# TODO: remove it in future versions\nif settings.AXES_LOCKOUT_PARAMETERS is None:\n if getattr(settings, \"AXES_ONLY_USER_FAILURES\", False):\n settings.AXES_LOCKOUT_PARAMETERS = [\"username\"]\n else:\n if getattr(settings, \"AXES_LOCK_OUT_BY_USER_OR_IP\", False):\n settings.AXES_LOCKOUT_PARAMETERS = [\"username\", \"ip_address\"]\n elif getattr(settings, \"AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP\", False):\n settings.AXES_LOCKOUT_PARAMETERS = [[\"username\", \"ip_address\"]]\n else:\n settings.AXES_LOCKOUT_PARAMETERS = [\"ip_address\"]\n\n if getattr(settings, \"AXES_USE_USER_AGENT\", False):\n if isinstance(settings.AXES_LOCKOUT_PARAMETERS[0], str):\n settings.AXES_LOCKOUT_PARAMETERS[0] = [\n settings.AXES_LOCKOUT_PARAMETERS[0],\n \"user_agent\",\n ]\n else:\n settings.AXES_LOCKOUT_PARAMETERS[0].append(\"user_agent\")\n\n# lock out just for admin site\nsettings.AXES_ONLY_ADMIN_SITE = getattr(settings, \"AXES_ONLY_ADMIN_SITE\", False)\n\n# show Axes logs in admin\nsettings.AXES_ENABLE_ADMIN = getattr(settings, \"AXES_ENABLE_ADMIN\", True)\n\n# use a specific username field to retrieve from login POST data\nsettings.AXES_USERNAME_FORM_FIELD = getattr(\n settings, \"AXES_USERNAME_FORM_FIELD\", \"username\"\n)\n\n# use a specific password field to retrieve from login POST data\nsettings.AXES_PASSWORD_FORM_FIELD = getattr(\n settings, \"AXES_PASSWORD_FORM_FIELD\", \"password\"\n) # noqa\n\n# use a provided callable to transform the POSTed username into the one used in credentials\nsettings.AXES_USERNAME_CALLABLE = getattr(settings, \"AXES_USERNAME_CALLABLE\", None)\n\n# determine if given user should be always allowed to attempt authentication\nsettings.AXES_WHITELIST_CALLABLE = getattr(settings, \"AXES_WHITELIST_CALLABLE\", None)\n\n# return custom lockout response if configured\nsettings.AXES_LOCKOUT_CALLABLE = getattr(settings, \"AXES_LOCKOUT_CALLABLE\", None)\n\n# use a provided callable to get client ip address\nsettings.AXES_CLIENT_IP_CALLABLE = getattr(settings, \"AXES_CLIENT_IP_CALLABLE\", None)\n\n# reset the number of failed attempts after one successful attempt\nsettings.AXES_RESET_ON_SUCCESS = getattr(settings, \"AXES_RESET_ON_SUCCESS\", False)\n\nsettings.AXES_DISABLE_ACCESS_LOG = getattr(settings, \"AXES_DISABLE_ACCESS_LOG\", False)\n\nsettings.AXES_ENABLE_ACCESS_FAILURE_LOG = getattr(\n settings, \"AXES_ENABLE_ACCESS_FAILURE_LOG\", False\n)\n\nsettings.AXES_ACCESS_FAILURE_LOG_PER_USER_LIMIT = getattr(\n settings, \"AXES_ACCESS_FAILURE_LOG_PER_USER_LIMIT\", 1000\n)\n\nsettings.AXES_HANDLER = getattr(\n settings, \"AXES_HANDLER\", \"axes.handlers.database.AxesDatabaseHandler\"\n)\n\nsettings.AXES_LOCKOUT_TEMPLATE = getattr(settings, \"AXES_LOCKOUT_TEMPLATE\", None)\n\nsettings.AXES_LOCKOUT_URL = getattr(settings, \"AXES_LOCKOUT_URL\", None)\n\nsettings.AXES_COOLOFF_TIME = getattr(settings, \"AXES_COOLOFF_TIME\", None)\n\nsettings.AXES_VERBOSE = getattr(settings, \"AXES_VERBOSE\", settings.AXES_ENABLED)\n\n# whitelist and blacklist\nsettings.AXES_NEVER_LOCKOUT_WHITELIST = getattr(\n settings, \"AXES_NEVER_LOCKOUT_WHITELIST\", False\n)\n\nsettings.AXES_NEVER_LOCKOUT_GET = getattr(settings, \"AXES_NEVER_LOCKOUT_GET\", False)\n\nsettings.AXES_ONLY_WHITELIST = getattr(settings, \"AXES_ONLY_WHITELIST\", False)\n\nsettings.AXES_IP_WHITELIST = getattr(settings, \"AXES_IP_WHITELIST\", None)\n\nsettings.AXES_IP_BLACKLIST = getattr(settings, \"AXES_IP_BLACKLIST\", None)\n\n# message to show when locked out and have cooloff enabled\nsettings.AXES_COOLOFF_MESSAGE = getattr(\n settings,\n \"AXES_COOLOFF_MESSAGE\",\n _(\"Account locked: too many login attempts. Please try again later.\"),\n)\n\n# message to show when locked out and have cooloff disabled\nsettings.AXES_PERMALOCK_MESSAGE = getattr(\n settings,\n \"AXES_PERMALOCK_MESSAGE\",\n _(\n \"Account locked: too many login attempts. Contact an admin to unlock your account.\"\n ),\n)\n\n# set CORS allowed origins when calling authentication over ajax\nsettings.AXES_ALLOWED_CORS_ORIGINS = getattr(settings, \"AXES_ALLOWED_CORS_ORIGINS\", \"*\")\n\n# set the list of sensitive parameters to cleanse from get/post data before logging\nsettings.AXES_SENSITIVE_PARAMETERS = getattr(\n settings,\n \"AXES_SENSITIVE_PARAMETERS\",\n [\"username\", \"ip_address\"],\n)\n\n# set the callable for the readable string that can be used in\n# e.g. logging to distinguish client requests\nsettings.AXES_CLIENT_STR_CALLABLE = getattr(settings, \"AXES_CLIENT_STR_CALLABLE\", None)\n\n# set the HTTP response code given by too many requests\nsettings.AXES_HTTP_RESPONSE_CODE = getattr(settings, \"AXES_HTTP_RESPONSE_CODE\", 429)\n\n# If True, a failed login attempt during lockout will reset the cool off period\nsettings.AXES_RESET_COOL_OFF_ON_FAILURE_DURING_LOCKOUT = getattr(\n settings, \"AXES_RESET_COOL_OFF_ON_FAILURE_DURING_LOCKOUT\", True\n)\n\n\n###\n# django-ipware settings for client IP address calculation and proxy detection\n# there are old AXES_PROXY_ and AXES_META_ legacy keys present for backwards compatibility\n# see https://github.com/un33k/django-ipware for further details\n###\n\n# if your deployment is using reverse proxies, set this value to 'left-most' or 'right-most' per your configuration\nsettings.AXES_IPWARE_PROXY_ORDER = getattr(\n settings,\n \"AXES_IPWARE_PROXY_ORDER\",\n getattr(settings, \"AXES_PROXY_ORDER\", \"left-most\"),\n)\n\n# if your deployment is using reverse proxies, set this value to the number of proxies in front of Django\nsettings.AXES_IPWARE_PROXY_COUNT = getattr(\n settings,\n \"AXES_IPWARE_PROXY_COUNT\",\n getattr(settings, \"AXES_PROXY_COUNT\", None),\n)\n\n# if your deployment is using reverse proxies, set to your trusted proxy IP addresses prefixes if needed\nsettings.AXES_IPWARE_PROXY_TRUSTED_IPS = getattr(\n settings,\n \"AXES_IPWARE_PROXY_TRUSTED_IPS\",\n getattr(settings, \"AXES_PROXY_TRUSTED_IPS\", None),\n)\n\n# set to the names of request.META attributes that should be checked for the IP address of the client\n# if your deployment is using reverse proxies, ensure that the header attributes are securely set by the proxy\n# ensure that the client can not spoof the headers by setting them and sending them through the proxy\nsettings.AXES_IPWARE_META_PRECEDENCE_ORDER = getattr(\n settings,\n \"AXES_IPWARE_META_PRECEDENCE_ORDER\",\n getattr(\n settings,\n \"AXES_META_PRECEDENCE_ORDER\",\n getattr(settings, \"IPWARE_META_PRECEDENCE_ORDER\", (\"REMOTE_ADDR\",)),\n ),\n)\n",
"path": "axes/conf.py"
}
] | diff --git a/axes/conf.py b/axes/conf.py
index daf1c70b..27514459 100644
--- a/axes/conf.py
+++ b/axes/conf.py
@@ -124,7 +124,7 @@
settings.AXES_SENSITIVE_PARAMETERS = getattr(
settings,
"AXES_SENSITIVE_PARAMETERS",
- [],
+ ["username", "ip_address"],
)
# set the callable for the readable string that can be used in
diff --git a/docs/4_configuration.rst b/docs/4_configuration.rst
index a2f3abe6..f64320e4 100644
--- a/docs/4_configuration.rst
+++ b/docs/4_configuration.rst
@@ -59,7 +59,7 @@ The following ``settings.py`` options are available for customizing Axes behavio
+------------------------------------------------------+----------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| AXES_PASSWORD_FORM_FIELD | 'password' | The name of the form or credentials field that contains your users password. |
+------------------------------------------------------+----------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| AXES_SENSITIVE_PARAMETERS | [] | Configures POST and GET parameter values (in addition to the value of ``AXES_PASSWORD_FORM_FIELD``) to mask in login attempt logging. |
+| AXES_SENSITIVE_PARAMETERS | ["username", "ip_address"] | Configures POST and GET parameter values (in addition to the value of ``AXES_PASSWORD_FORM_FIELD``) to mask in login attempt logging. Defaults enable privacy-by-design. |
+------------------------------------------------------+----------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| AXES_NEVER_LOCKOUT_GET | False | If ``True``, Axes will never lock out HTTP GET requests. |
+------------------------------------------------------+----------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/tests/test_helpers.py b/tests/test_helpers.py
index 60f91281..0201fbba 100644
--- a/tests/test_helpers.py
+++ b/tests/test_helpers.py
@@ -82,6 +82,7 @@ def test_iso8601(self):
self.assertEqual(get_cool_off_iso8601(delta), iso_duration)
+@override_settings(AXES_SENSITIVE_PARAMETERS=[])
class ClientStringTestCase(AxesTestCase):
@staticmethod
def get_expected_client_str(*args, **kwargs):
@@ -1020,6 +1021,7 @@ def setUp(self):
"other_sensitive_data": "sensitive",
}
+ @override_settings(AXES_SENSITIVE_PARAMETERS=[])
def test_cleanse_parameters(self):
cleansed = cleanse_parameters(self.parameters)
self.assertEqual("test_user", cleansed["username"])
@@ -1041,6 +1043,7 @@ def test_cleanse_parameters_override_both(self):
self.assertEqual("********************", cleansed["password"])
self.assertEqual("********************", cleansed["other_sensitive_data"])
+ @override_settings(AXES_SENSITIVE_PARAMETERS=[])
@override_settings(AXES_PASSWORD_FORM_FIELD=None)
def test_cleanse_parameters_override_empty(self):
cleansed = cleanse_parameters(self.parameters)
|
beeware__toga-2582 | Dialog windows are not modal
### Describe the bug
Create a dialog like this:
```
async def on_button_test(widget):
await self.main_window.info_dialog(title="Dialog", message="An Info Dialog")
```
You can now click on the main window behind the dialog and the main window is activated although the dialog is still floating above the window. This allows the dialog to be shown again or other actions to be triggered although the user should be blocked in the dialog.
Either change the behavior of dialogs to be modal or add a "modal={True|False}" property to "info_dialog" (and all similar functions) to enforce this.
### Steps to reproduce
1) Create self.main_window.info_dialog
2) Click main window
3) Main window is activated and can be interacted with.
### Expected behavior
Main window can not be activated (dialog keeps focus)
### Screenshots
_No response_
### Environment
Linux (GenToo). Toga in virtual environment.
### Logs
_No response_
### Additional context
_No response_
| [
{
"content": "from abc import ABC\nfrom pathlib import Path\n\nfrom .libs import Gtk\n\n\nclass BaseDialog(ABC):\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n\n\nclass MessageDialog(BaseDialog):\n def __init__(\n self,\n interface,\n title,\n message_type,\n buttons,\n success_result=None,\n **kwargs,\n ):\n super().__init__(interface=interface)\n self.success_result = success_result\n\n self.native = Gtk.MessageDialog(\n transient_for=interface.window._impl.native,\n flags=0,\n message_type=message_type,\n buttons=buttons,\n text=title,\n )\n self.build_dialog(**kwargs)\n\n self.native.connect(\"response\", self.gtk_response)\n self.native.show()\n\n def build_dialog(self, message):\n self.native.format_secondary_text(message)\n\n def gtk_response(self, dialog, response):\n if self.success_result:\n result = response == self.success_result\n else:\n result = None\n\n self.interface.set_result(result)\n\n self.native.destroy()\n\n\nclass InfoDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface=interface,\n title=title,\n message=message,\n message_type=Gtk.MessageType.INFO,\n buttons=Gtk.ButtonsType.OK,\n )\n\n\nclass QuestionDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface=interface,\n title=title,\n message=message,\n message_type=Gtk.MessageType.QUESTION,\n buttons=Gtk.ButtonsType.YES_NO,\n success_result=Gtk.ResponseType.YES,\n )\n\n\nclass ConfirmDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface=interface,\n title=title,\n message=message,\n message_type=Gtk.MessageType.WARNING,\n buttons=Gtk.ButtonsType.OK_CANCEL,\n success_result=Gtk.ResponseType.OK,\n )\n\n\nclass ErrorDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface=interface,\n title=title,\n message=message,\n message_type=Gtk.MessageType.ERROR,\n buttons=Gtk.ButtonsType.CANCEL,\n )\n\n\nclass StackTraceDialog(MessageDialog):\n def __init__(self, interface, title, **kwargs):\n super().__init__(\n interface=interface,\n title=title,\n message_type=Gtk.MessageType.ERROR,\n buttons=(\n Gtk.ButtonsType.CANCEL if kwargs.get(\"retry\") else Gtk.ButtonsType.OK\n ),\n success_result=Gtk.ResponseType.OK if kwargs.get(\"retry\") else None,\n **kwargs,\n )\n\n def build_dialog(self, message, content, retry):\n container = self.native.get_message_area()\n\n self.native.format_secondary_text(message)\n\n # Create a scrolling readonly text area, in monospace font, to contain the stack trace.\n buffer = Gtk.TextBuffer()\n buffer.set_text(content)\n\n trace = Gtk.TextView()\n trace.set_buffer(buffer)\n trace.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)\n trace.set_property(\"editable\", False)\n trace.set_property(\"cursor-visible\", False)\n\n trace.get_style_context().add_class(\"toga\")\n trace.get_style_context().add_class(\"stacktrace\")\n trace.get_style_context().add_class(\"dialog\")\n\n style_provider = Gtk.CssProvider()\n style_provider.load_from_data(b\".toga.stacktrace {font-family: monospace;}\")\n\n trace.get_style_context().add_provider(\n style_provider,\n Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION,\n )\n\n scroll = Gtk.ScrolledWindow()\n scroll.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)\n scroll.set_size_request(500, 200)\n scroll.add(trace)\n\n container.pack_end(scroll, False, False, 0)\n\n container.show_all()\n\n # If this is a retry dialog, add a retry button (which maps to OK).\n if retry:\n self.native.add_button(\"Retry\", Gtk.ResponseType.OK)\n\n\nclass FileDialog(BaseDialog):\n def __init__(\n self,\n interface,\n title,\n filename,\n initial_directory,\n file_types,\n multiple_select,\n action,\n ok_icon,\n ):\n super().__init__(interface=interface)\n\n self.native = Gtk.FileChooserDialog(\n transient_for=interface.window._impl.native,\n title=title,\n action=action,\n )\n self.native.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)\n self.native.add_button(ok_icon, Gtk.ResponseType.OK)\n\n if filename:\n self.native.set_current_name(filename)\n\n if initial_directory:\n self.native.set_current_folder(str(initial_directory))\n\n if file_types:\n for file_type in file_types:\n filter_filetype = Gtk.FileFilter()\n filter_filetype.set_name(\".\" + file_type + \" files\")\n filter_filetype.add_pattern(\"*.\" + file_type)\n self.native.add_filter(filter_filetype)\n\n self.multiple_select = multiple_select\n if self.multiple_select:\n self.native.set_select_multiple(True)\n\n self.native.connect(\"response\", self.gtk_response)\n self.native.show()\n\n # Provided as a stub that can be mocked in test conditions\n def selected_path(self):\n return self.native.get_filename()\n\n # Provided as a stub that can be mocked in test conditions\n def selected_paths(self):\n return self.native.get_filenames()\n\n def gtk_response(self, dialog, response):\n if response == Gtk.ResponseType.OK:\n if self.multiple_select:\n result = [Path(filename) for filename in self.selected_paths()]\n else:\n result = Path(self.selected_path())\n else:\n result = None\n\n self.interface.set_result(result)\n\n self.native.destroy()\n\n\nclass SaveFileDialog(FileDialog):\n def __init__(\n self,\n interface,\n title,\n filename,\n initial_directory,\n file_types=None,\n ):\n super().__init__(\n interface=interface,\n title=title,\n filename=filename,\n initial_directory=initial_directory,\n file_types=file_types,\n multiple_select=False,\n action=Gtk.FileChooserAction.SAVE,\n ok_icon=Gtk.STOCK_SAVE,\n )\n\n\nclass OpenFileDialog(FileDialog):\n def __init__(\n self,\n interface,\n title,\n initial_directory,\n file_types,\n multiple_select,\n ):\n super().__init__(\n interface=interface,\n title=title,\n filename=None,\n initial_directory=initial_directory,\n file_types=file_types,\n multiple_select=multiple_select,\n action=Gtk.FileChooserAction.OPEN,\n ok_icon=Gtk.STOCK_OPEN,\n )\n\n\nclass SelectFolderDialog(FileDialog):\n def __init__(\n self,\n interface,\n title,\n initial_directory,\n multiple_select,\n ):\n super().__init__(\n interface=interface,\n title=title,\n filename=None,\n initial_directory=initial_directory,\n file_types=None,\n multiple_select=multiple_select,\n action=Gtk.FileChooserAction.SELECT_FOLDER,\n ok_icon=Gtk.STOCK_OPEN,\n )\n",
"path": "gtk/src/toga_gtk/dialogs.py"
}
] | [
{
"content": "from abc import ABC\nfrom pathlib import Path\n\nfrom .libs import Gtk\n\n\nclass BaseDialog(ABC):\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n\n\nclass MessageDialog(BaseDialog):\n def __init__(\n self,\n interface,\n title,\n message_type,\n buttons,\n success_result=None,\n **kwargs,\n ):\n super().__init__(interface=interface)\n self.success_result = success_result\n\n self.native = Gtk.MessageDialog(\n transient_for=interface.window._impl.native,\n flags=0,\n message_type=message_type,\n buttons=buttons,\n text=title,\n )\n self.native.set_modal(True)\n self.build_dialog(**kwargs)\n\n self.native.connect(\"response\", self.gtk_response)\n self.native.show()\n\n def build_dialog(self, message):\n self.native.format_secondary_text(message)\n\n def gtk_response(self, dialog, response):\n if self.success_result:\n result = response == self.success_result\n else:\n result = None\n\n self.interface.set_result(result)\n\n self.native.destroy()\n\n\nclass InfoDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface=interface,\n title=title,\n message=message,\n message_type=Gtk.MessageType.INFO,\n buttons=Gtk.ButtonsType.OK,\n )\n\n\nclass QuestionDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface=interface,\n title=title,\n message=message,\n message_type=Gtk.MessageType.QUESTION,\n buttons=Gtk.ButtonsType.YES_NO,\n success_result=Gtk.ResponseType.YES,\n )\n\n\nclass ConfirmDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface=interface,\n title=title,\n message=message,\n message_type=Gtk.MessageType.WARNING,\n buttons=Gtk.ButtonsType.OK_CANCEL,\n success_result=Gtk.ResponseType.OK,\n )\n\n\nclass ErrorDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface=interface,\n title=title,\n message=message,\n message_type=Gtk.MessageType.ERROR,\n buttons=Gtk.ButtonsType.CANCEL,\n )\n\n\nclass StackTraceDialog(MessageDialog):\n def __init__(self, interface, title, **kwargs):\n super().__init__(\n interface=interface,\n title=title,\n message_type=Gtk.MessageType.ERROR,\n buttons=(\n Gtk.ButtonsType.CANCEL if kwargs.get(\"retry\") else Gtk.ButtonsType.OK\n ),\n success_result=Gtk.ResponseType.OK if kwargs.get(\"retry\") else None,\n **kwargs,\n )\n\n def build_dialog(self, message, content, retry):\n container = self.native.get_message_area()\n\n self.native.format_secondary_text(message)\n\n # Create a scrolling readonly text area, in monospace font, to contain the stack trace.\n buffer = Gtk.TextBuffer()\n buffer.set_text(content)\n\n trace = Gtk.TextView()\n trace.set_buffer(buffer)\n trace.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)\n trace.set_property(\"editable\", False)\n trace.set_property(\"cursor-visible\", False)\n\n trace.get_style_context().add_class(\"toga\")\n trace.get_style_context().add_class(\"stacktrace\")\n trace.get_style_context().add_class(\"dialog\")\n\n style_provider = Gtk.CssProvider()\n style_provider.load_from_data(b\".toga.stacktrace {font-family: monospace;}\")\n\n trace.get_style_context().add_provider(\n style_provider,\n Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION,\n )\n\n scroll = Gtk.ScrolledWindow()\n scroll.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)\n scroll.set_size_request(500, 200)\n scroll.add(trace)\n\n container.pack_end(scroll, False, False, 0)\n\n container.show_all()\n\n # If this is a retry dialog, add a retry button (which maps to OK).\n if retry:\n self.native.add_button(\"Retry\", Gtk.ResponseType.OK)\n\n\nclass FileDialog(BaseDialog):\n def __init__(\n self,\n interface,\n title,\n filename,\n initial_directory,\n file_types,\n multiple_select,\n action,\n ok_icon,\n ):\n super().__init__(interface=interface)\n\n self.native = Gtk.FileChooserDialog(\n transient_for=interface.window._impl.native,\n title=title,\n action=action,\n )\n self.native.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)\n self.native.add_button(ok_icon, Gtk.ResponseType.OK)\n\n if filename:\n self.native.set_current_name(filename)\n\n if initial_directory:\n self.native.set_current_folder(str(initial_directory))\n\n if file_types:\n for file_type in file_types:\n filter_filetype = Gtk.FileFilter()\n filter_filetype.set_name(\".\" + file_type + \" files\")\n filter_filetype.add_pattern(\"*.\" + file_type)\n self.native.add_filter(filter_filetype)\n\n self.multiple_select = multiple_select\n if self.multiple_select:\n self.native.set_select_multiple(True)\n\n self.native.connect(\"response\", self.gtk_response)\n self.native.show()\n\n # Provided as a stub that can be mocked in test conditions\n def selected_path(self):\n return self.native.get_filename()\n\n # Provided as a stub that can be mocked in test conditions\n def selected_paths(self):\n return self.native.get_filenames()\n\n def gtk_response(self, dialog, response):\n if response == Gtk.ResponseType.OK:\n if self.multiple_select:\n result = [Path(filename) for filename in self.selected_paths()]\n else:\n result = Path(self.selected_path())\n else:\n result = None\n\n self.interface.set_result(result)\n\n self.native.destroy()\n\n\nclass SaveFileDialog(FileDialog):\n def __init__(\n self,\n interface,\n title,\n filename,\n initial_directory,\n file_types=None,\n ):\n super().__init__(\n interface=interface,\n title=title,\n filename=filename,\n initial_directory=initial_directory,\n file_types=file_types,\n multiple_select=False,\n action=Gtk.FileChooserAction.SAVE,\n ok_icon=Gtk.STOCK_SAVE,\n )\n\n\nclass OpenFileDialog(FileDialog):\n def __init__(\n self,\n interface,\n title,\n initial_directory,\n file_types,\n multiple_select,\n ):\n super().__init__(\n interface=interface,\n title=title,\n filename=None,\n initial_directory=initial_directory,\n file_types=file_types,\n multiple_select=multiple_select,\n action=Gtk.FileChooserAction.OPEN,\n ok_icon=Gtk.STOCK_OPEN,\n )\n\n\nclass SelectFolderDialog(FileDialog):\n def __init__(\n self,\n interface,\n title,\n initial_directory,\n multiple_select,\n ):\n super().__init__(\n interface=interface,\n title=title,\n filename=None,\n initial_directory=initial_directory,\n file_types=None,\n multiple_select=multiple_select,\n action=Gtk.FileChooserAction.SELECT_FOLDER,\n ok_icon=Gtk.STOCK_OPEN,\n )\n",
"path": "gtk/src/toga_gtk/dialogs.py"
}
] | diff --git a/android/tests_backend/window.py b/android/tests_backend/window.py
index bc283f43a3..8b0d05c408 100644
--- a/android/tests_backend/window.py
+++ b/android/tests_backend/window.py
@@ -87,3 +87,6 @@ def assert_toolbar_item(self, index, label, tooltip, has_icon, enabled):
def press_toolbar_button(self, index):
self.native.onOptionsItemSelected(self._toolbar_items()[index])
+
+ def is_modal_dialog(self, dialog):
+ return True
diff --git a/changes/2446.bugfix.rst b/changes/2446.bugfix.rst
new file mode 100644
index 0000000000..34088215a5
--- /dev/null
+++ b/changes/2446.bugfix.rst
@@ -0,0 +1 @@
+Dialog windows are now properly modal when using the Gtk+ backend.
diff --git a/cocoa/tests_backend/window.py b/cocoa/tests_backend/window.py
index 9eeba9ff29..7c1e41c50c 100644
--- a/cocoa/tests_backend/window.py
+++ b/cocoa/tests_backend/window.py
@@ -261,3 +261,6 @@ def press_toolbar_button(self, index):
restype=None,
argtypes=[objc_id],
)
+
+ def is_modal_dialog(self, dialog):
+ return True
diff --git a/gtk/src/toga_gtk/dialogs.py b/gtk/src/toga_gtk/dialogs.py
index 274032c382..61bf9285e8 100644
--- a/gtk/src/toga_gtk/dialogs.py
+++ b/gtk/src/toga_gtk/dialogs.py
@@ -30,6 +30,7 @@ def __init__(
buttons=buttons,
text=title,
)
+ self.native.set_modal(True)
self.build_dialog(**kwargs)
self.native.connect("response", self.gtk_response)
diff --git a/gtk/tests_backend/window.py b/gtk/tests_backend/window.py
index a90b60f8ef..ed115e2577 100644
--- a/gtk/tests_backend/window.py
+++ b/gtk/tests_backend/window.py
@@ -251,3 +251,6 @@ def assert_toolbar_item(self, index, label, tooltip, has_icon, enabled):
def press_toolbar_button(self, index):
item = self.impl.native_toolbar.get_nth_item(index)
item.emit("clicked")
+
+ def is_modal_dialog(self, dialog):
+ return dialog.native.get_modal()
diff --git a/iOS/tests_backend/window.py b/iOS/tests_backend/window.py
index 08f9a34295..8e68cc9367 100644
--- a/iOS/tests_backend/window.py
+++ b/iOS/tests_backend/window.py
@@ -77,3 +77,6 @@ async def close_select_folder_dialog(self, dialog, result, multiple_select):
def has_toolbar(self):
pytest.skip("Toolbars not implemented on iOS")
+
+ def is_modal_dialog(self, dialog):
+ return True
diff --git a/testbed/tests/test_window.py b/testbed/tests/test_window.py
index 19d5a2fd95..d234a4c6e2 100644
--- a/testbed/tests/test_window.py
+++ b/testbed/tests/test_window.py
@@ -560,6 +560,7 @@ async def test_info_dialog(main_window, main_window_probe):
"Info", "Some info", on_result=on_result_handler
)
await main_window_probe.redraw("Info dialog displayed")
+ assert main_window_probe.is_modal_dialog(dialog_result._impl)
await main_window_probe.close_info_dialog(dialog_result._impl)
await assert_dialog_result(main_window, dialog_result, on_result_handler, None)
diff --git a/winforms/tests_backend/window.py b/winforms/tests_backend/window.py
index d7e8d7ac9c..55d6c4d63c 100644
--- a/winforms/tests_backend/window.py
+++ b/winforms/tests_backend/window.py
@@ -148,3 +148,6 @@ def assert_toolbar_item(self, index, label, tooltip, has_icon, enabled):
def press_toolbar_button(self, index):
self._native_toolbar_item(index).OnClick(EventArgs.Empty)
+
+ def is_modal_dialog(self, dialog):
+ return True
|
jazzband__pip-tools-808 | pip-compile replaces password in URL with ****
I am using Python 3.6.8, pip-tools 19.1 and setuptools 41.0.1 in an virtual environment.
Since some days, when i compile my requirements.in file which includes a package format with an git repo url like:
```
...
-e git+http://user:[email protected]/scm/path/git-repo-name.git#egg=packagename
google-api-python-client
...
```
the password string in the url is replaces with "***":
```
...
-e git+http://user:****@myhost.com/scm/path/git-repo-name.git#egg=packagename
google-api-python-client==1.7.8
...
```
Is there a way to prevent this behaviour?
Regards,
Josef
| [
{
"content": "# coding: utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport sys\nfrom collections import OrderedDict\nfrom itertools import chain, groupby\n\nfrom ._compat import install_req_from_line\nfrom .click import style\n\nUNSAFE_PACKAGES = {\"setuptools\", \"distribute\", \"pip\"}\n\n\ndef key_from_ireq(ireq):\n \"\"\"Get a standardized key for an InstallRequirement.\"\"\"\n if ireq.req is None and ireq.link is not None:\n return str(ireq.link)\n else:\n return key_from_req(ireq.req)\n\n\ndef key_from_req(req):\n \"\"\"Get an all-lowercase version of the requirement's name.\"\"\"\n if hasattr(req, \"key\"):\n # from pkg_resources, such as installed dists for pip-sync\n key = req.key\n else:\n # from packaging, such as install requirements from requirements.txt\n key = req.name\n\n key = key.replace(\"_\", \"-\").lower()\n return key\n\n\ndef comment(text):\n return style(text, fg=\"green\")\n\n\ndef make_install_requirement(name, version, extras, constraint=False):\n # If no extras are specified, the extras string is blank\n extras_string = \"\"\n if extras:\n # Sort extras for stability\n extras_string = \"[{}]\".format(\",\".join(sorted(extras)))\n\n return install_req_from_line(\n str(\"{}{}=={}\".format(name, extras_string, version)), constraint=constraint\n )\n\n\ndef format_requirement(ireq, marker=None, hashes=None):\n \"\"\"\n Generic formatter for pretty printing InstallRequirements to the terminal\n in a less verbose way than using its `__str__` method.\n \"\"\"\n if ireq.editable:\n line = \"-e {}\".format(ireq.link)\n else:\n line = str(ireq.req).lower()\n\n if marker:\n line = \"{} ; {}\".format(line, marker)\n\n if hashes:\n for hash_ in sorted(hashes):\n line += \" \\\\\\n --hash={}\".format(hash_)\n\n return line\n\n\ndef format_specifier(ireq):\n \"\"\"\n Generic formatter for pretty printing the specifier part of\n InstallRequirements to the terminal.\n \"\"\"\n # TODO: Ideally, this is carried over to the pip library itself\n specs = ireq.specifier._specs if ireq.req is not None else []\n specs = sorted(specs, key=lambda x: x._spec[1])\n return \",\".join(str(s) for s in specs) or \"<any>\"\n\n\ndef is_pinned_requirement(ireq):\n \"\"\"\n Returns whether an InstallRequirement is a \"pinned\" requirement.\n\n An InstallRequirement is considered pinned if:\n\n - Is not editable\n - It has exactly one specifier\n - That specifier is \"==\"\n - The version does not contain a wildcard\n\n Examples:\n django==1.8 # pinned\n django>1.8 # NOT pinned\n django~=1.8 # NOT pinned\n django==1.* # NOT pinned\n \"\"\"\n if ireq.editable:\n return False\n\n if len(ireq.specifier._specs) != 1:\n return False\n\n op, version = next(iter(ireq.specifier._specs))._spec\n return (op == \"==\" or op == \"===\") and not version.endswith(\".*\")\n\n\ndef as_tuple(ireq):\n \"\"\"\n Pulls out the (name: str, version:str, extras:(str)) tuple from\n the pinned InstallRequirement.\n \"\"\"\n if not is_pinned_requirement(ireq):\n raise TypeError(\"Expected a pinned InstallRequirement, got {}\".format(ireq))\n\n name = key_from_req(ireq.req)\n version = next(iter(ireq.specifier._specs))._spec[1]\n extras = tuple(sorted(ireq.extras))\n return name, version, extras\n\n\ndef full_groupby(iterable, key=None):\n \"\"\"Like groupby(), but sorts the input on the group key first.\"\"\"\n return groupby(sorted(iterable, key=key), key=key)\n\n\ndef flat_map(fn, collection):\n \"\"\"Map a function over a collection and flatten the result by one-level\"\"\"\n return chain.from_iterable(map(fn, collection))\n\n\ndef lookup_table(values, key=None, keyval=None, unique=False, use_lists=False):\n \"\"\"\n Builds a dict-based lookup table (index) elegantly.\n\n Supports building normal and unique lookup tables. For example:\n\n >>> assert lookup_table(\n ... ['foo', 'bar', 'baz', 'qux', 'quux'], lambda s: s[0]) == {\n ... 'b': {'bar', 'baz'},\n ... 'f': {'foo'},\n ... 'q': {'quux', 'qux'}\n ... }\n\n For key functions that uniquely identify values, set unique=True:\n\n >>> assert lookup_table(\n ... ['foo', 'bar', 'baz', 'qux', 'quux'], lambda s: s[0],\n ... unique=True) == {\n ... 'b': 'baz',\n ... 'f': 'foo',\n ... 'q': 'quux'\n ... }\n\n For the values represented as lists, set use_lists=True:\n\n >>> assert lookup_table(\n ... ['foo', 'bar', 'baz', 'qux', 'quux'], lambda s: s[0],\n ... use_lists=True) == {\n ... 'b': ['bar', 'baz'],\n ... 'f': ['foo'],\n ... 'q': ['qux', 'quux']\n ... }\n\n The values of the resulting lookup table will be values, not sets.\n\n For extra power, you can even change the values while building up the LUT.\n To do so, use the `keyval` function instead of the `key` arg:\n\n >>> assert lookup_table(\n ... ['foo', 'bar', 'baz', 'qux', 'quux'],\n ... keyval=lambda s: (s[0], s[1:])) == {\n ... 'b': {'ar', 'az'},\n ... 'f': {'oo'},\n ... 'q': {'uux', 'ux'}\n ... }\n\n \"\"\"\n if keyval is None:\n if key is None:\n\n def keyval(v):\n return v\n\n else:\n\n def keyval(v):\n return (key(v), v)\n\n if unique:\n return dict(keyval(v) for v in values)\n\n lut = {}\n for value in values:\n k, v = keyval(value)\n try:\n s = lut[k]\n except KeyError:\n if use_lists:\n s = lut[k] = list()\n else:\n s = lut[k] = set()\n if use_lists:\n s.append(v)\n else:\n s.add(v)\n return dict(lut)\n\n\ndef dedup(iterable):\n \"\"\"Deduplicate an iterable object like iter(set(iterable)) but\n order-reserved.\n \"\"\"\n return iter(OrderedDict.fromkeys(iterable))\n\n\ndef name_from_req(req):\n \"\"\"Get the name of the requirement\"\"\"\n if hasattr(req, \"project_name\"):\n # from pkg_resources, such as installed dists for pip-sync\n return req.project_name\n else:\n # from packaging, such as install requirements from requirements.txt\n return req.name\n\n\ndef fs_str(string):\n \"\"\"\n Convert given string to a correctly encoded filesystem string.\n\n On Python 2, if the input string is unicode, converts it to bytes\n encoded with the filesystem encoding.\n\n On Python 3 returns the string as is, since Python 3 uses unicode\n paths and the input string shouldn't be bytes.\n\n :type string: str|unicode\n :rtype: str\n \"\"\"\n if isinstance(string, str):\n return string\n if isinstance(string, bytes):\n raise AssertionError\n return string.encode(_fs_encoding)\n\n\n_fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()\n\n\ndef get_hashes_from_ireq(ireq):\n \"\"\"\n Given an InstallRequirement, return a list of string hashes in\n the format \"{algorithm}:{hash}\". Return an empty list if there are no hashes\n in the requirement options.\n \"\"\"\n result = []\n ireq_hashes = ireq.options.get(\"hashes\", {})\n for algorithm, hexdigests in ireq_hashes.items():\n for hash_ in hexdigests:\n result.append(\"{}:{}\".format(algorithm, hash_))\n return result\n",
"path": "piptools/utils.py"
}
] | [
{
"content": "# coding: utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport sys\nfrom collections import OrderedDict\nfrom itertools import chain, groupby\n\nfrom ._compat import install_req_from_line\nfrom .click import style\n\nUNSAFE_PACKAGES = {\"setuptools\", \"distribute\", \"pip\"}\n\n\ndef key_from_ireq(ireq):\n \"\"\"Get a standardized key for an InstallRequirement.\"\"\"\n if ireq.req is None and ireq.link is not None:\n return str(ireq.link)\n else:\n return key_from_req(ireq.req)\n\n\ndef key_from_req(req):\n \"\"\"Get an all-lowercase version of the requirement's name.\"\"\"\n if hasattr(req, \"key\"):\n # from pkg_resources, such as installed dists for pip-sync\n key = req.key\n else:\n # from packaging, such as install requirements from requirements.txt\n key = req.name\n\n key = key.replace(\"_\", \"-\").lower()\n return key\n\n\ndef comment(text):\n return style(text, fg=\"green\")\n\n\ndef make_install_requirement(name, version, extras, constraint=False):\n # If no extras are specified, the extras string is blank\n extras_string = \"\"\n if extras:\n # Sort extras for stability\n extras_string = \"[{}]\".format(\",\".join(sorted(extras)))\n\n return install_req_from_line(\n str(\"{}{}=={}\".format(name, extras_string, version)), constraint=constraint\n )\n\n\ndef format_requirement(ireq, marker=None, hashes=None):\n \"\"\"\n Generic formatter for pretty printing InstallRequirements to the terminal\n in a less verbose way than using its `__str__` method.\n \"\"\"\n if ireq.editable:\n line = \"-e {}\".format(ireq.link.url)\n else:\n line = str(ireq.req).lower()\n\n if marker:\n line = \"{} ; {}\".format(line, marker)\n\n if hashes:\n for hash_ in sorted(hashes):\n line += \" \\\\\\n --hash={}\".format(hash_)\n\n return line\n\n\ndef format_specifier(ireq):\n \"\"\"\n Generic formatter for pretty printing the specifier part of\n InstallRequirements to the terminal.\n \"\"\"\n # TODO: Ideally, this is carried over to the pip library itself\n specs = ireq.specifier._specs if ireq.req is not None else []\n specs = sorted(specs, key=lambda x: x._spec[1])\n return \",\".join(str(s) for s in specs) or \"<any>\"\n\n\ndef is_pinned_requirement(ireq):\n \"\"\"\n Returns whether an InstallRequirement is a \"pinned\" requirement.\n\n An InstallRequirement is considered pinned if:\n\n - Is not editable\n - It has exactly one specifier\n - That specifier is \"==\"\n - The version does not contain a wildcard\n\n Examples:\n django==1.8 # pinned\n django>1.8 # NOT pinned\n django~=1.8 # NOT pinned\n django==1.* # NOT pinned\n \"\"\"\n if ireq.editable:\n return False\n\n if len(ireq.specifier._specs) != 1:\n return False\n\n op, version = next(iter(ireq.specifier._specs))._spec\n return (op == \"==\" or op == \"===\") and not version.endswith(\".*\")\n\n\ndef as_tuple(ireq):\n \"\"\"\n Pulls out the (name: str, version:str, extras:(str)) tuple from\n the pinned InstallRequirement.\n \"\"\"\n if not is_pinned_requirement(ireq):\n raise TypeError(\"Expected a pinned InstallRequirement, got {}\".format(ireq))\n\n name = key_from_req(ireq.req)\n version = next(iter(ireq.specifier._specs))._spec[1]\n extras = tuple(sorted(ireq.extras))\n return name, version, extras\n\n\ndef full_groupby(iterable, key=None):\n \"\"\"Like groupby(), but sorts the input on the group key first.\"\"\"\n return groupby(sorted(iterable, key=key), key=key)\n\n\ndef flat_map(fn, collection):\n \"\"\"Map a function over a collection and flatten the result by one-level\"\"\"\n return chain.from_iterable(map(fn, collection))\n\n\ndef lookup_table(values, key=None, keyval=None, unique=False, use_lists=False):\n \"\"\"\n Builds a dict-based lookup table (index) elegantly.\n\n Supports building normal and unique lookup tables. For example:\n\n >>> assert lookup_table(\n ... ['foo', 'bar', 'baz', 'qux', 'quux'], lambda s: s[0]) == {\n ... 'b': {'bar', 'baz'},\n ... 'f': {'foo'},\n ... 'q': {'quux', 'qux'}\n ... }\n\n For key functions that uniquely identify values, set unique=True:\n\n >>> assert lookup_table(\n ... ['foo', 'bar', 'baz', 'qux', 'quux'], lambda s: s[0],\n ... unique=True) == {\n ... 'b': 'baz',\n ... 'f': 'foo',\n ... 'q': 'quux'\n ... }\n\n For the values represented as lists, set use_lists=True:\n\n >>> assert lookup_table(\n ... ['foo', 'bar', 'baz', 'qux', 'quux'], lambda s: s[0],\n ... use_lists=True) == {\n ... 'b': ['bar', 'baz'],\n ... 'f': ['foo'],\n ... 'q': ['qux', 'quux']\n ... }\n\n The values of the resulting lookup table will be values, not sets.\n\n For extra power, you can even change the values while building up the LUT.\n To do so, use the `keyval` function instead of the `key` arg:\n\n >>> assert lookup_table(\n ... ['foo', 'bar', 'baz', 'qux', 'quux'],\n ... keyval=lambda s: (s[0], s[1:])) == {\n ... 'b': {'ar', 'az'},\n ... 'f': {'oo'},\n ... 'q': {'uux', 'ux'}\n ... }\n\n \"\"\"\n if keyval is None:\n if key is None:\n\n def keyval(v):\n return v\n\n else:\n\n def keyval(v):\n return (key(v), v)\n\n if unique:\n return dict(keyval(v) for v in values)\n\n lut = {}\n for value in values:\n k, v = keyval(value)\n try:\n s = lut[k]\n except KeyError:\n if use_lists:\n s = lut[k] = list()\n else:\n s = lut[k] = set()\n if use_lists:\n s.append(v)\n else:\n s.add(v)\n return dict(lut)\n\n\ndef dedup(iterable):\n \"\"\"Deduplicate an iterable object like iter(set(iterable)) but\n order-reserved.\n \"\"\"\n return iter(OrderedDict.fromkeys(iterable))\n\n\ndef name_from_req(req):\n \"\"\"Get the name of the requirement\"\"\"\n if hasattr(req, \"project_name\"):\n # from pkg_resources, such as installed dists for pip-sync\n return req.project_name\n else:\n # from packaging, such as install requirements from requirements.txt\n return req.name\n\n\ndef fs_str(string):\n \"\"\"\n Convert given string to a correctly encoded filesystem string.\n\n On Python 2, if the input string is unicode, converts it to bytes\n encoded with the filesystem encoding.\n\n On Python 3 returns the string as is, since Python 3 uses unicode\n paths and the input string shouldn't be bytes.\n\n :type string: str|unicode\n :rtype: str\n \"\"\"\n if isinstance(string, str):\n return string\n if isinstance(string, bytes):\n raise AssertionError\n return string.encode(_fs_encoding)\n\n\n_fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()\n\n\ndef get_hashes_from_ireq(ireq):\n \"\"\"\n Given an InstallRequirement, return a list of string hashes in\n the format \"{algorithm}:{hash}\". Return an empty list if there are no hashes\n in the requirement options.\n \"\"\"\n result = []\n ireq_hashes = ireq.options.get(\"hashes\", {})\n for algorithm, hexdigests in ireq_hashes.items():\n for hash_ in hexdigests:\n result.append(\"{}:{}\".format(algorithm, hash_))\n return result\n",
"path": "piptools/utils.py"
}
] | diff --git a/piptools/utils.py b/piptools/utils.py
index 6d159542f..085505df8 100644
--- a/piptools/utils.py
+++ b/piptools/utils.py
@@ -54,7 +54,7 @@ def format_requirement(ireq, marker=None, hashes=None):
in a less verbose way than using its `__str__` method.
"""
if ireq.editable:
- line = "-e {}".format(ireq.link)
+ line = "-e {}".format(ireq.link.url)
else:
line = str(ireq.req).lower()
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 655ac768d..584e836eb 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -19,11 +19,23 @@ def test_format_requirement(from_line):
assert format_requirement(ireq) == "test==1.2"
-def test_format_requirement_editable(from_editable):
+def test_format_requirement_editable_vcs(from_editable):
ireq = from_editable("git+git://fake.org/x/y.git#egg=y")
assert format_requirement(ireq) == "-e git+git://fake.org/x/y.git#egg=y"
+def test_format_requirement_editable_vcs_with_password(from_editable):
+ ireq = from_editable("git+git://user:[email protected]/x/y.git#egg=y")
+ assert (
+ format_requirement(ireq) == "-e git+git://user:[email protected]/x/y.git#egg=y"
+ )
+
+
+def test_format_requirement_editable_local_path(from_editable):
+ ireq = from_editable("file:///home/user/package")
+ assert format_requirement(ireq) == "-e file:///home/user/package"
+
+
def test_format_requirement_ireq_with_hashes(from_line):
ireq = from_line("pytz==2017.2")
ireq_hashes = [
|
zulip__zulip-5423 | Some of codeblock typeahead doesn't work
There are some codeblock typeaheads (from pygments) that doesn't work when I try it in Zulip, such as `c#`, I think `c#` breaks the codeblock syntax since there is an `#` in it. Btw, we could use `csharp` for alternative of `c#`.
~~--And why there's a `pycon` typeahead, I think it's not a programming language?~~
Edit: `pycon` is a abbreviation of `python console` so it's a normal behaviour
I think we could solve this by removing them (typeahead that doesn't work) from codeblock typeahead list (`tools/setup/lang.json`), and some places that generate those typeaheads..

How to reproduce:
1. Type triple backticks (`) and type c.
2. Then there will appear some options, select c#
3. Type a code, and send the message
Some of codeblock typeahead doesn't work
There are some codeblock typeaheads (from pygments) that doesn't work when I try it in Zulip, such as `c#`, I think `c#` breaks the codeblock syntax since there is an `#` in it. Btw, we could use `csharp` for alternative of `c#`.
~~--And why there's a `pycon` typeahead, I think it's not a programming language?~~
Edit: `pycon` is a abbreviation of `python console` so it's a normal behaviour
I think we could solve this by removing them (typeahead that doesn't work) from codeblock typeahead list (`tools/setup/lang.json`), and some places that generate those typeaheads..

How to reproduce:
1. Type triple backticks (`) and type c.
2. Then there will appear some options, select c#
3. Type a code, and send the message
| [
{
"content": "\"\"\"\nFenced Code Extension for Python Markdown\n=========================================\n\nThis extension adds Fenced Code Blocks to Python-Markdown.\n\n >>> import markdown\n >>> text = '''\n ... A paragraph before a fenced code block:\n ...\n ... ~~~\n ... Fenced code block\n ... ~~~\n ... '''\n >>> html = markdown.markdown(text, extensions=['fenced_code'])\n >>> print html\n <p>A paragraph before a fenced code block:</p>\n <pre><code>Fenced code block\n </code></pre>\n\nWorks with safe_mode also (we check this because we are using the HtmlStash):\n\n >>> print markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')\n <p>A paragraph before a fenced code block:</p>\n <pre><code>Fenced code block\n </code></pre>\n\nInclude tilde's in a code block and wrap with blank lines:\n\n >>> text = '''\n ... ~~~~~~~~\n ...\n ... ~~~~\n ... ~~~~~~~~'''\n >>> print markdown.markdown(text, extensions=['fenced_code'])\n <pre><code>\n ~~~~\n </code></pre>\n\nRemoves trailing whitespace from code blocks that cause horizontal scrolling\n >>> import markdown\n >>> text = '''\n ... A paragraph before a fenced code block:\n ...\n ... ~~~\n ... Fenced code block \\t\\t\\t\\t\\t\\t\\t\n ... ~~~\n ... '''\n >>> html = markdown.markdown(text, extensions=['fenced_code'])\n >>> print html\n <p>A paragraph before a fenced code block:</p>\n <pre><code>Fenced code block\n </code></pre>\n\nLanguage tags:\n\n >>> text = '''\n ... ~~~~{.python}\n ... # Some python code\n ... ~~~~'''\n >>> print markdown.markdown(text, extensions=['fenced_code'])\n <pre><code class=\"python\"># Some python code\n </code></pre>\n\nCopyright 2007-2008 [Waylan Limberg](http://achinghead.com/).\n\nProject website: <http://packages.python.org/Markdown/extensions/fenced_code_blocks.html>\nContact: [email protected]\n\nLicense: BSD (see ../docs/LICENSE for details)\n\nDependencies:\n* [Python 2.4+](http://python.org)\n* [Markdown 2.0+](http://packages.python.org/Markdown/)\n* [Pygments (optional)](http://pygments.org)\n\n\"\"\"\n\nimport re\nimport subprocess\nimport markdown\nimport six\nfrom django.utils.html import escape\nfrom markdown.extensions.codehilite import CodeHilite, CodeHiliteExtension\nfrom zerver.lib.str_utils import force_bytes\nfrom zerver.lib.tex import render_tex\nfrom typing import Any, Dict, Iterable, List, MutableSequence, Optional, Tuple, Union, Text\n\n# Global vars\nFENCE_RE = re.compile(u\"\"\"\n # ~~~ or ```\n (?P<fence>\n ^(?:~{3,}|`{3,})\n )\n\n [ ]* # spaces\n\n (\n \\\\{?\\\\.?\n (?P<lang>\n [a-zA-Z0-9_+-]*\n ) # \"py\" or \"javascript\"\n \\\\}?\n ) # language, like \".py\" or \"{javascript}\"\n [ ]* # spaces\n $\n \"\"\", re.VERBOSE)\n\n\nCODE_WRAP = u'<pre><code%s>%s\\n</code></pre>'\nLANG_TAG = u' class=\"%s\"'\n\nclass FencedCodeExtension(markdown.Extension):\n\n def extendMarkdown(self, md, md_globals):\n # type: (markdown.Markdown, Dict[str, Any]) -> None\n \"\"\" Add FencedBlockPreprocessor to the Markdown instance. \"\"\"\n md.registerExtension(self)\n\n # Newer versions of Python-Markdown (starting at 2.3?) have\n # a normalize_whitespace preprocessor that needs to go first.\n position = ('>normalize_whitespace'\n if 'normalize_whitespace' in md.preprocessors\n else '_begin')\n\n md.preprocessors.add('fenced_code_block',\n FencedBlockPreprocessor(md),\n position)\n\n\nclass FencedBlockPreprocessor(markdown.preprocessors.Preprocessor):\n def __init__(self, md):\n # type: (markdown.Markdown) -> None\n markdown.preprocessors.Preprocessor.__init__(self, md)\n\n self.checked_for_codehilite = False\n self.codehilite_conf = {} # type: Dict[str, List[Any]]\n\n def run(self, lines):\n # type: (Iterable[Text]) -> List[Text]\n \"\"\" Match and store Fenced Code Blocks in the HtmlStash. \"\"\"\n\n output = [] # type: List[Text]\n\n class BaseHandler(object):\n def handle_line(self, line):\n # type: (Text) -> None\n raise NotImplementedError()\n\n def done(self):\n # type: () -> None\n raise NotImplementedError()\n\n processor = self\n handlers = [] # type: List[BaseHandler]\n\n def push(handler):\n # type: (BaseHandler) -> None\n handlers.append(handler)\n\n def pop():\n # type: () -> None\n handlers.pop()\n\n def check_for_new_fence(output, line):\n # type: (MutableSequence[Text], Text) -> None\n m = FENCE_RE.match(line)\n if m:\n fence = m.group('fence')\n lang = m.group('lang')\n handler = generic_handler(output, fence, lang)\n push(handler)\n else:\n output.append(line)\n\n class OuterHandler(BaseHandler):\n def __init__(self, output):\n # type: (MutableSequence[Text]) -> None\n self.output = output\n\n def handle_line(self, line):\n # type: (Text) -> None\n check_for_new_fence(self.output, line)\n\n def done(self):\n # type: () -> None\n pop()\n\n def generic_handler(output, fence, lang):\n # type: (MutableSequence[Text], Text, Text) -> BaseHandler\n if lang in ('quote', 'quoted'):\n return QuoteHandler(output, fence)\n elif lang in ('math', 'tex', 'latex'):\n return TexHandler(output, fence)\n else:\n return CodeHandler(output, fence, lang)\n\n class CodeHandler(BaseHandler):\n def __init__(self, output, fence, lang):\n # type: (MutableSequence[Text], Text, Text) -> None\n self.output = output\n self.fence = fence\n self.lang = lang\n self.lines = [] # type: List[Text]\n\n def handle_line(self, line):\n # type: (Text) -> None\n if line.rstrip() == self.fence:\n self.done()\n else:\n self.lines.append(line.rstrip())\n\n def done(self):\n # type: () -> None\n text = '\\n'.join(self.lines)\n text = processor.format_code(self.lang, text)\n text = processor.placeholder(text)\n processed_lines = text.split('\\n')\n self.output.append('')\n self.output.extend(processed_lines)\n self.output.append('')\n pop()\n\n class QuoteHandler(BaseHandler):\n def __init__(self, output, fence):\n # type: (MutableSequence[Text], Text) -> None\n self.output = output\n self.fence = fence\n self.lines = [] # type: List[Text]\n\n def handle_line(self, line):\n # type: (Text) -> None\n if line.rstrip() == self.fence:\n self.done()\n else:\n check_for_new_fence(self.lines, line)\n\n def done(self):\n # type: () -> None\n text = '\\n'.join(self.lines)\n text = processor.format_quote(text)\n processed_lines = text.split('\\n')\n self.output.append('')\n self.output.extend(processed_lines)\n self.output.append('')\n pop()\n\n class TexHandler(BaseHandler):\n def __init__(self, output, fence):\n # type: (MutableSequence[Text], Text) -> None\n self.output = output\n self.fence = fence\n self.lines = [] # type: List[Text]\n\n def handle_line(self, line):\n # type: (Text) -> None\n if line.rstrip() == self.fence:\n self.done()\n else:\n check_for_new_fence(self.lines, line)\n\n def done(self):\n # type: () -> None\n text = '\\n'.join(self.lines)\n text = processor.format_tex(text)\n text = processor.placeholder(text)\n processed_lines = text.split('\\n')\n self.output.append('')\n self.output.extend(processed_lines)\n self.output.append('')\n pop()\n\n handler = OuterHandler(output)\n push(handler)\n\n for line in lines:\n handlers[-1].handle_line(line)\n\n while handlers:\n handlers[-1].done()\n\n # This fiddly handling of new lines at the end of our output was done to make\n # existing tests pass. Bugdown is just kind of funny when it comes to new lines,\n # but we could probably remove this hack.\n if len(output) > 2 and output[-2] != '':\n output.append('')\n return output\n\n def format_code(self, lang, text):\n # type: (Text, Text) -> Text\n if lang:\n langclass = LANG_TAG % (lang,)\n else:\n langclass = ''\n\n # Check for code hilite extension\n if not self.checked_for_codehilite:\n for ext in self.markdown.registeredExtensions:\n if isinstance(ext, CodeHiliteExtension):\n self.codehilite_conf = ext.config\n break\n\n self.checked_for_codehilite = True\n\n # If config is not empty, then the codehighlite extension\n # is enabled, so we call it to highlite the code\n if self.codehilite_conf:\n highliter = CodeHilite(text,\n linenums=self.codehilite_conf['linenums'][0],\n guess_lang=self.codehilite_conf['guess_lang'][0],\n css_class=self.codehilite_conf['css_class'][0],\n style=self.codehilite_conf['pygments_style'][0],\n use_pygments=self.codehilite_conf['use_pygments'][0],\n lang=(lang or None),\n noclasses=self.codehilite_conf['noclasses'][0])\n\n code = highliter.hilite()\n else:\n code = CODE_WRAP % (langclass, self._escape(text))\n\n return code\n\n def format_quote(self, text):\n # type: (Text) -> Text\n paragraphs = text.split(\"\\n\\n\")\n quoted_paragraphs = []\n for paragraph in paragraphs:\n lines = paragraph.split(\"\\n\")\n quoted_paragraphs.append(\"\\n\".join(\"> \" + line for line in lines if line != ''))\n return \"\\n\\n\".join(quoted_paragraphs)\n\n def format_tex(self, text):\n # type: (Text) -> Text\n paragraphs = text.split(\"\\n\\n\")\n tex_paragraphs = []\n for paragraph in paragraphs:\n html = render_tex(paragraph, is_inline=False)\n if html is not None:\n tex_paragraphs.append(html)\n else:\n tex_paragraphs.append('<span class=\"tex-error\">' +\n escape(paragraph) + '</span>')\n return \"\\n\\n\".join(tex_paragraphs)\n\n def placeholder(self, code):\n # type: (Text) -> Text\n return self.markdown.htmlStash.store(code, safe=True)\n\n def _escape(self, txt):\n # type: (Text) -> Text\n \"\"\" basic html escaping \"\"\"\n txt = txt.replace('&', '&')\n txt = txt.replace('<', '<')\n txt = txt.replace('>', '>')\n txt = txt.replace('\"', '"')\n return txt\n\n\ndef makeExtension(*args, **kwargs):\n # type: (*Any, **Union[bool, None, Text]) -> FencedCodeExtension\n return FencedCodeExtension(*args, **kwargs)\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n",
"path": "zerver/lib/bugdown/fenced_code.py"
}
] | [
{
"content": "\"\"\"\nFenced Code Extension for Python Markdown\n=========================================\n\nThis extension adds Fenced Code Blocks to Python-Markdown.\n\n >>> import markdown\n >>> text = '''\n ... A paragraph before a fenced code block:\n ...\n ... ~~~\n ... Fenced code block\n ... ~~~\n ... '''\n >>> html = markdown.markdown(text, extensions=['fenced_code'])\n >>> print html\n <p>A paragraph before a fenced code block:</p>\n <pre><code>Fenced code block\n </code></pre>\n\nWorks with safe_mode also (we check this because we are using the HtmlStash):\n\n >>> print markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')\n <p>A paragraph before a fenced code block:</p>\n <pre><code>Fenced code block\n </code></pre>\n\nInclude tilde's in a code block and wrap with blank lines:\n\n >>> text = '''\n ... ~~~~~~~~\n ...\n ... ~~~~\n ... ~~~~~~~~'''\n >>> print markdown.markdown(text, extensions=['fenced_code'])\n <pre><code>\n ~~~~\n </code></pre>\n\nRemoves trailing whitespace from code blocks that cause horizontal scrolling\n >>> import markdown\n >>> text = '''\n ... A paragraph before a fenced code block:\n ...\n ... ~~~\n ... Fenced code block \\t\\t\\t\\t\\t\\t\\t\n ... ~~~\n ... '''\n >>> html = markdown.markdown(text, extensions=['fenced_code'])\n >>> print html\n <p>A paragraph before a fenced code block:</p>\n <pre><code>Fenced code block\n </code></pre>\n\nLanguage tags:\n\n >>> text = '''\n ... ~~~~{.python}\n ... # Some python code\n ... ~~~~'''\n >>> print markdown.markdown(text, extensions=['fenced_code'])\n <pre><code class=\"python\"># Some python code\n </code></pre>\n\nCopyright 2007-2008 [Waylan Limberg](http://achinghead.com/).\n\nProject website: <http://packages.python.org/Markdown/extensions/fenced_code_blocks.html>\nContact: [email protected]\n\nLicense: BSD (see ../docs/LICENSE for details)\n\nDependencies:\n* [Python 2.4+](http://python.org)\n* [Markdown 2.0+](http://packages.python.org/Markdown/)\n* [Pygments (optional)](http://pygments.org)\n\n\"\"\"\n\nimport re\nimport subprocess\nimport markdown\nimport six\nfrom django.utils.html import escape\nfrom markdown.extensions.codehilite import CodeHilite, CodeHiliteExtension\nfrom zerver.lib.str_utils import force_bytes\nfrom zerver.lib.tex import render_tex\nfrom typing import Any, Dict, Iterable, List, MutableSequence, Optional, Tuple, Union, Text\n\n# Global vars\nFENCE_RE = re.compile(u\"\"\"\n # ~~~ or ```\n (?P<fence>\n ^(?:~{3,}|`{3,})\n )\n\n [ ]* # spaces\n\n (\n \\\\{?\\\\.?\n (?P<lang>\n [a-zA-Z0-9_+-./#]*\n ) # \"py\" or \"javascript\"\n \\\\}?\n ) # language, like \".py\" or \"{javascript}\"\n [ ]* # spaces\n $\n \"\"\", re.VERBOSE)\n\n\nCODE_WRAP = u'<pre><code%s>%s\\n</code></pre>'\nLANG_TAG = u' class=\"%s\"'\n\nclass FencedCodeExtension(markdown.Extension):\n\n def extendMarkdown(self, md, md_globals):\n # type: (markdown.Markdown, Dict[str, Any]) -> None\n \"\"\" Add FencedBlockPreprocessor to the Markdown instance. \"\"\"\n md.registerExtension(self)\n\n # Newer versions of Python-Markdown (starting at 2.3?) have\n # a normalize_whitespace preprocessor that needs to go first.\n position = ('>normalize_whitespace'\n if 'normalize_whitespace' in md.preprocessors\n else '_begin')\n\n md.preprocessors.add('fenced_code_block',\n FencedBlockPreprocessor(md),\n position)\n\n\nclass FencedBlockPreprocessor(markdown.preprocessors.Preprocessor):\n def __init__(self, md):\n # type: (markdown.Markdown) -> None\n markdown.preprocessors.Preprocessor.__init__(self, md)\n\n self.checked_for_codehilite = False\n self.codehilite_conf = {} # type: Dict[str, List[Any]]\n\n def run(self, lines):\n # type: (Iterable[Text]) -> List[Text]\n \"\"\" Match and store Fenced Code Blocks in the HtmlStash. \"\"\"\n\n output = [] # type: List[Text]\n\n class BaseHandler(object):\n def handle_line(self, line):\n # type: (Text) -> None\n raise NotImplementedError()\n\n def done(self):\n # type: () -> None\n raise NotImplementedError()\n\n processor = self\n handlers = [] # type: List[BaseHandler]\n\n def push(handler):\n # type: (BaseHandler) -> None\n handlers.append(handler)\n\n def pop():\n # type: () -> None\n handlers.pop()\n\n def check_for_new_fence(output, line):\n # type: (MutableSequence[Text], Text) -> None\n m = FENCE_RE.match(line)\n if m:\n fence = m.group('fence')\n lang = m.group('lang')\n handler = generic_handler(output, fence, lang)\n push(handler)\n else:\n output.append(line)\n\n class OuterHandler(BaseHandler):\n def __init__(self, output):\n # type: (MutableSequence[Text]) -> None\n self.output = output\n\n def handle_line(self, line):\n # type: (Text) -> None\n check_for_new_fence(self.output, line)\n\n def done(self):\n # type: () -> None\n pop()\n\n def generic_handler(output, fence, lang):\n # type: (MutableSequence[Text], Text, Text) -> BaseHandler\n if lang in ('quote', 'quoted'):\n return QuoteHandler(output, fence)\n elif lang in ('math', 'tex', 'latex'):\n return TexHandler(output, fence)\n else:\n return CodeHandler(output, fence, lang)\n\n class CodeHandler(BaseHandler):\n def __init__(self, output, fence, lang):\n # type: (MutableSequence[Text], Text, Text) -> None\n self.output = output\n self.fence = fence\n self.lang = lang\n self.lines = [] # type: List[Text]\n\n def handle_line(self, line):\n # type: (Text) -> None\n if line.rstrip() == self.fence:\n self.done()\n else:\n self.lines.append(line.rstrip())\n\n def done(self):\n # type: () -> None\n text = '\\n'.join(self.lines)\n text = processor.format_code(self.lang, text)\n text = processor.placeholder(text)\n processed_lines = text.split('\\n')\n self.output.append('')\n self.output.extend(processed_lines)\n self.output.append('')\n pop()\n\n class QuoteHandler(BaseHandler):\n def __init__(self, output, fence):\n # type: (MutableSequence[Text], Text) -> None\n self.output = output\n self.fence = fence\n self.lines = [] # type: List[Text]\n\n def handle_line(self, line):\n # type: (Text) -> None\n if line.rstrip() == self.fence:\n self.done()\n else:\n check_for_new_fence(self.lines, line)\n\n def done(self):\n # type: () -> None\n text = '\\n'.join(self.lines)\n text = processor.format_quote(text)\n processed_lines = text.split('\\n')\n self.output.append('')\n self.output.extend(processed_lines)\n self.output.append('')\n pop()\n\n class TexHandler(BaseHandler):\n def __init__(self, output, fence):\n # type: (MutableSequence[Text], Text) -> None\n self.output = output\n self.fence = fence\n self.lines = [] # type: List[Text]\n\n def handle_line(self, line):\n # type: (Text) -> None\n if line.rstrip() == self.fence:\n self.done()\n else:\n check_for_new_fence(self.lines, line)\n\n def done(self):\n # type: () -> None\n text = '\\n'.join(self.lines)\n text = processor.format_tex(text)\n text = processor.placeholder(text)\n processed_lines = text.split('\\n')\n self.output.append('')\n self.output.extend(processed_lines)\n self.output.append('')\n pop()\n\n handler = OuterHandler(output)\n push(handler)\n\n for line in lines:\n handlers[-1].handle_line(line)\n\n while handlers:\n handlers[-1].done()\n\n # This fiddly handling of new lines at the end of our output was done to make\n # existing tests pass. Bugdown is just kind of funny when it comes to new lines,\n # but we could probably remove this hack.\n if len(output) > 2 and output[-2] != '':\n output.append('')\n return output\n\n def format_code(self, lang, text):\n # type: (Text, Text) -> Text\n if lang:\n langclass = LANG_TAG % (lang,)\n else:\n langclass = ''\n\n # Check for code hilite extension\n if not self.checked_for_codehilite:\n for ext in self.markdown.registeredExtensions:\n if isinstance(ext, CodeHiliteExtension):\n self.codehilite_conf = ext.config\n break\n\n self.checked_for_codehilite = True\n\n # If config is not empty, then the codehighlite extension\n # is enabled, so we call it to highlite the code\n if self.codehilite_conf:\n highliter = CodeHilite(text,\n linenums=self.codehilite_conf['linenums'][0],\n guess_lang=self.codehilite_conf['guess_lang'][0],\n css_class=self.codehilite_conf['css_class'][0],\n style=self.codehilite_conf['pygments_style'][0],\n use_pygments=self.codehilite_conf['use_pygments'][0],\n lang=(lang or None),\n noclasses=self.codehilite_conf['noclasses'][0])\n\n code = highliter.hilite()\n else:\n code = CODE_WRAP % (langclass, self._escape(text))\n\n return code\n\n def format_quote(self, text):\n # type: (Text) -> Text\n paragraphs = text.split(\"\\n\\n\")\n quoted_paragraphs = []\n for paragraph in paragraphs:\n lines = paragraph.split(\"\\n\")\n quoted_paragraphs.append(\"\\n\".join(\"> \" + line for line in lines if line != ''))\n return \"\\n\\n\".join(quoted_paragraphs)\n\n def format_tex(self, text):\n # type: (Text) -> Text\n paragraphs = text.split(\"\\n\\n\")\n tex_paragraphs = []\n for paragraph in paragraphs:\n html = render_tex(paragraph, is_inline=False)\n if html is not None:\n tex_paragraphs.append(html)\n else:\n tex_paragraphs.append('<span class=\"tex-error\">' +\n escape(paragraph) + '</span>')\n return \"\\n\\n\".join(tex_paragraphs)\n\n def placeholder(self, code):\n # type: (Text) -> Text\n return self.markdown.htmlStash.store(code, safe=True)\n\n def _escape(self, txt):\n # type: (Text) -> Text\n \"\"\" basic html escaping \"\"\"\n txt = txt.replace('&', '&')\n txt = txt.replace('<', '<')\n txt = txt.replace('>', '>')\n txt = txt.replace('\"', '"')\n return txt\n\n\ndef makeExtension(*args, **kwargs):\n # type: (*Any, **Union[bool, None, Text]) -> FencedCodeExtension\n return FencedCodeExtension(*args, **kwargs)\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n",
"path": "zerver/lib/bugdown/fenced_code.py"
}
] | diff --git a/frontend_tests/node_tests/markdown.js b/frontend_tests/node_tests/markdown.js
index 129992a838587..5afa1c5b3207e 100644
--- a/frontend_tests/node_tests/markdown.js
+++ b/frontend_tests/node_tests/markdown.js
@@ -87,6 +87,15 @@ var social = {
stream_data.add_sub('Denmark', denmark);
stream_data.add_sub('social', social);
+// Check the default behavior of fenced code blocks
+// works properly before markdown is initialized.
+(function test_fenced_block_defaults() {
+ var input = '\n```\nfenced code\n```\n\nand then after\n';
+ var expected = '\n\n<div class="codehilite"><pre><span></span>fenced code\n</pre></div>\n\n\n\nand then after\n\n';
+ var output = fenced_code.process_fenced_code(input);
+ assert.equal(output, expected);
+}());
+
var markdown = require('js/markdown.js');
markdown.initialize();
@@ -181,6 +190,10 @@ var bugdown_data = JSON.parse(fs.readFileSync(path.join(__dirname, '../../zerver
expected: '<div class="codehilite"><pre><span></span> fenced code trailing whitespace\n</pre></div>\n\n\n<p>and then after</p>'},
{input: '* a\n* list \n* here',
expected: '<ul>\n<li>a</li>\n<li>list </li>\n<li>here</li>\n</ul>'},
+ {input: '\n```c#\nfenced code special\n```\n\nand then after\n',
+ expected: '<div class="codehilite"><pre><span></span>fenced code special\n</pre></div>\n\n\n<p>and then after</p>'},
+ {input: '\n```vb.net\nfenced code dot\n```\n\nand then after\n',
+ expected: '<div class="codehilite"><pre><span></span>fenced code dot\n</pre></div>\n\n\n<p>and then after</p>'},
{input: 'Some text first\n* a\n* list \n* here\n\nand then after',
expected: '<p>Some text first</p>\n<ul>\n<li>a</li>\n<li>list </li>\n<li>here</li>\n</ul>\n<p>and then after</p>'},
{input: '1. an\n2. ordered \n3. list',
diff --git a/static/js/fenced_code.js b/static/js/fenced_code.js
index fbbab9e136220..d4228aaf67d81 100644
--- a/static/js/fenced_code.js
+++ b/static/js/fenced_code.js
@@ -9,13 +9,13 @@ var exports = {};
// auto-completing code blocks missing a trailing close.
// See backend fenced_code.py:71 for associated regexp
-var fencestr = "^(~{3,}|`{3,})" + // Opening Fence
- "[ ]*" + // Spaces
- "(" +
- "\\{?\\.?" +
- "([a-zA-Z0-9_+-]*)" + // Language
- "\\}?" +
- "[ ]*" + // Spaces
+var fencestr = "^(~{3,}|`{3,})" + // Opening Fence
+ "[ ]*" + // Spaces
+ "(" +
+ "\\{?\\.?" +
+ "([a-zA-Z0-9_+-./#]*)" + // Language
+ "\\}?" +
+ "[ ]*" + // Spaces
")$";
var fence_re = new RegExp(fencestr);
diff --git a/tools/test-js-with-node b/tools/test-js-with-node
index 33a7729575d8b..c59e009a6d1ef 100755
--- a/tools/test-js-with-node
+++ b/tools/test-js-with-node
@@ -29,6 +29,7 @@ enforce_fully_covered = {
'static/js/compose_ui.js',
'static/js/dict.js',
'static/js/filter.js',
+ 'static/js/fenced_code.js',
'static/js/hash_util.js',
'static/js/muting.js',
'static/js/people.js',
diff --git a/zerver/lib/bugdown/fenced_code.py b/zerver/lib/bugdown/fenced_code.py
index fb8a5eb3d0d5a..db4ca35525748 100644
--- a/zerver/lib/bugdown/fenced_code.py
+++ b/zerver/lib/bugdown/fenced_code.py
@@ -98,7 +98,7 @@
(
\\{?\\.?
(?P<lang>
- [a-zA-Z0-9_+-]*
+ [a-zA-Z0-9_+-./#]*
) # "py" or "javascript"
\\}?
) # language, like ".py" or "{javascript}"
diff --git a/zerver/tests/test_bugdown.py b/zerver/tests/test_bugdown.py
index 2f42b1494af13..861ccd2e6eaf9 100644
--- a/zerver/tests/test_bugdown.py
+++ b/zerver/tests/test_bugdown.py
@@ -110,10 +110,13 @@ def test_serial_code(self):
'hello()',
'```',
'',
- '``` .py',
+ '```vb.net',
'goodbye()',
'```',
'',
+ '```c#',
+ 'weirdchar()',
+ '```',
''
]
expected = [
@@ -122,8 +125,11 @@ def test_serial_code(self):
'',
'',
'',
- '**py:goodbye()**',
+ '**vb.net:goodbye()**',
+ '',
+ '',
'',
+ '**c#:weirdchar()**',
'',
''
]
|
cookiecutter__cookiecutter-1891 | 2.2.0 CLI reports version 2.1.2dev0
* Cookiecutter version: 2.2.0 (or 2.1.2dev0, depending on who you ask)
* Template project url: n/a
* Python version: 3.11
* Operating System: linux
### Description:
Get the accurate version of cookiecutter from the CLI
### What I've run:
```bash
cookiecutter --version
Cookiecutter 2.1.2.dev0 from $PREFIX/lib/python3.11/site-packages (Python 3.11.4 | packaged by conda-forge | (main, Jun 10 2023, 18:08:17) [GCC 12.2.0])
```
Would be a one-line fix, but ideally would be always be sourced from exactly one place:
- `setup.py` and `importlib_metadata`
- `__init__.py`
- a `VERSION` file
| [
{
"content": "\"\"\"cookiecutter distutils configuration.\"\"\"\nfrom setuptools import setup\n\nversion = \"2.2.2.dev0\"\n\nwith open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0,<9.0.0',\n 'pyyaml>=5.3.1',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n 'arrow',\n]\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n project_urls={\n \"Documentation\": \"https://cookiecutter.readthedocs.io\",\n \"Issues\": \"https://github.com/cookiecutter/cookiecutter/issues\",\n \"Discord\": \"https://discord.gg/9BrxzPKuEW\",\n },\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=3.7',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n ],\n keywords=[\n \"cookiecutter\",\n \"Python\",\n \"projects\",\n \"project templates\",\n \"Jinja2\",\n \"skeleton\",\n \"scaffolding\",\n \"project directory\",\n \"package\",\n \"packaging\",\n ],\n)\n",
"path": "setup.py"
}
] | [
{
"content": "\"\"\"cookiecutter distutils configuration.\"\"\"\nfrom setuptools import setup\n\nversion = \"2.2.2\"\n\nwith open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0,<9.0.0',\n 'pyyaml>=5.3.1',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n 'arrow',\n]\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n project_urls={\n \"Documentation\": \"https://cookiecutter.readthedocs.io\",\n \"Issues\": \"https://github.com/cookiecutter/cookiecutter/issues\",\n \"Discord\": \"https://discord.gg/9BrxzPKuEW\",\n },\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=3.7',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n ],\n keywords=[\n \"cookiecutter\",\n \"Python\",\n \"projects\",\n \"project templates\",\n \"Jinja2\",\n \"skeleton\",\n \"scaffolding\",\n \"project directory\",\n \"package\",\n \"packaging\",\n ],\n)\n",
"path": "setup.py"
}
] | diff --git a/HISTORY.md b/HISTORY.md
index 735443f91..9f646170a 100644
--- a/HISTORY.md
+++ b/HISTORY.md
@@ -2,6 +2,17 @@
History is important, but our current roadmap can be found [here](https://github.com/cookiecutter/cookiecutter/projects)
+## 2.2.2 (2023-07-10)
+
+### CI/CD and QA changes
+
+* Improve gitignore (#1889) @audreyfeldroy
+* Add warning for jinja2_time (#1890) @henryiii
+
+### This release is made by wonderful contributors:
+
+@audreyfeldroy, @ericof and @henryiii
+
## 2.2.0 (2023-07-06)
diff --git a/setup.py b/setup.py
index 25de62f15..a22733042 100644
--- a/setup.py
+++ b/setup.py
@@ -1,7 +1,7 @@
"""cookiecutter distutils configuration."""
from setuptools import setup
-version = "2.2.2.dev0"
+version = "2.2.2"
with open('README.md', encoding='utf-8') as readme_file:
readme = readme_file.read()
|
unionai-oss__pandera-1209 | Why python_requires <3.12?
In https://github.com/unionai-oss/pandera/commit/547aff1672fe455741f380c8bec1ed648074effc, `python_requires` was changed from `>=3.7` to `>=3.7,<=3.11`, and in a later commit, the upper bound was again changed to `<3.12`. This forces every downstream package or application to lower the upper bound from the typical default <4.0, which is unfortunate.
For example, with poetry, using the default `python = "^3.x"` version specification, pandera is now downgraded, or if one tries to force a newer version, version resolution fails:
```
> poetry update pandera
• Updating pandera (0.15.1 -> 0.14.5)
```
```
> poetry add [email protected]
The current project's Python requirement (>=3.9,<4.0) is not compatible with some of the required packages Python requirement:
- pandera requires Python >=3.7,<3.12, so it will not be satisfied for Python >=3.12,<4.0
Because my_package depends on pandera (0.15.1) which requires Python >=3.7,<3.12, version solving failed.
```
Is there a known issue with pandera on python 3.12? Otherwise, I recommend removing the constraint. While pandera might not be tested on 3.12 yet, it's common to assume the language will be backwards compatible as described in [PEP 387](https://peps.python.org/pep-0387/).
| [
{
"content": "from setuptools import find_packages, setup\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\nversion = {}\nwith open(\"pandera/version.py\") as fp:\n exec(fp.read(), version)\n\n_extras_require = {\n \"strategies\": [\"hypothesis >= 5.41.1\"],\n \"hypotheses\": [\"scipy\"],\n \"io\": [\"pyyaml >= 5.1\", \"black\", \"frictionless <= 4.40.8\"],\n \"pyspark\": [\"pyspark >= 3.2.0\"],\n \"modin\": [\"modin\", \"ray\", \"dask\"],\n \"modin-ray\": [\"modin\", \"ray\"],\n \"modin-dask\": [\"modin\", \"dask\"],\n \"dask\": [\"dask\"],\n \"mypy\": [\"pandas-stubs\"],\n \"fastapi\": [\"fastapi\"],\n \"geopandas\": [\"geopandas\", \"shapely\"],\n}\n\nextras_require = {\n **_extras_require,\n \"all\": list(set(x for y in _extras_require.values() for x in y)),\n}\n\nsetup(\n name=\"pandera\",\n version=version[\"__version__\"],\n author=\"Niels Bantilan\",\n author_email=\"[email protected]\",\n description=\"A light-weight and flexible data validation and testing tool for statistical data objects.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/pandera-dev/pandera\",\n project_urls={\n \"Documentation\": \"https://pandera.readthedocs.io\",\n \"Issue Tracker\": \"https://github.com/pandera-dev/pandera/issues\",\n },\n keywords=[\"pandas\", \"validation\", \"data-structures\"],\n license=\"MIT\",\n data_files=[(\"\", [\"LICENSE.txt\"])],\n packages=find_packages(include=[\"pandera*\"]),\n package_data={\"pandera\": [\"py.typed\"]},\n install_requires=[\n \"multimethod\",\n \"numpy >= 1.19.0\",\n \"packaging >= 20.0\",\n \"pandas >= 1.2.0\",\n \"pydantic\",\n \"typeguard >= 3.0.2\",\n \"typing_extensions >= 3.7.4.3 ; python_version<'3.8'\",\n \"typing_inspect >= 0.6.0\",\n \"wrapt\",\n ],\n extras_require=extras_require,\n python_requires=\">=3.7,<3.12\",\n platforms=\"any\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Operating System :: OS Independent\",\n \"License :: OSI Approved :: MIT License\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Scientific/Engineering\",\n ],\n)\n",
"path": "setup.py"
}
] | [
{
"content": "from setuptools import find_packages, setup\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\nversion = {}\nwith open(\"pandera/version.py\") as fp:\n exec(fp.read(), version)\n\n_extras_require = {\n \"strategies\": [\"hypothesis >= 5.41.1\"],\n \"hypotheses\": [\"scipy\"],\n \"io\": [\"pyyaml >= 5.1\", \"black\", \"frictionless <= 4.40.8\"],\n \"pyspark\": [\"pyspark >= 3.2.0\"],\n \"modin\": [\"modin\", \"ray\", \"dask\"],\n \"modin-ray\": [\"modin\", \"ray\"],\n \"modin-dask\": [\"modin\", \"dask\"],\n \"dask\": [\"dask\"],\n \"mypy\": [\"pandas-stubs\"],\n \"fastapi\": [\"fastapi\"],\n \"geopandas\": [\"geopandas\", \"shapely\"],\n}\n\nextras_require = {\n **_extras_require,\n \"all\": list(set(x for y in _extras_require.values() for x in y)),\n}\n\nsetup(\n name=\"pandera\",\n version=version[\"__version__\"],\n author=\"Niels Bantilan\",\n author_email=\"[email protected]\",\n description=\"A light-weight and flexible data validation and testing tool for statistical data objects.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/pandera-dev/pandera\",\n project_urls={\n \"Documentation\": \"https://pandera.readthedocs.io\",\n \"Issue Tracker\": \"https://github.com/pandera-dev/pandera/issues\",\n },\n keywords=[\"pandas\", \"validation\", \"data-structures\"],\n license=\"MIT\",\n data_files=[(\"\", [\"LICENSE.txt\"])],\n packages=find_packages(include=[\"pandera*\"]),\n package_data={\"pandera\": [\"py.typed\"]},\n install_requires=[\n \"multimethod\",\n \"numpy >= 1.19.0\",\n \"packaging >= 20.0\",\n \"pandas >= 1.2.0\",\n \"pydantic\",\n \"typeguard >= 3.0.2\",\n \"typing_extensions >= 3.7.4.3 ; python_version<'3.8'\",\n \"typing_inspect >= 0.6.0\",\n \"wrapt\",\n ],\n extras_require=extras_require,\n python_requires=\">=3.7\",\n platforms=\"any\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Operating System :: OS Independent\",\n \"License :: OSI Approved :: MIT License\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Scientific/Engineering\",\n ],\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 3b134dddd..cf4317c8f 100644
--- a/setup.py
+++ b/setup.py
@@ -56,7 +56,7 @@
"wrapt",
],
extras_require=extras_require,
- python_requires=">=3.7,<3.12",
+ python_requires=">=3.7",
platforms="any",
classifiers=[
"Development Status :: 5 - Production/Stable",
|
bridgecrewio__checkov-4077 | Crash when running a scan against a plan output
**Describe the issue**
We have Checkov configured in a GitHub Action workflow. Our configuration creates a Terraform plan, and then we run Checkov against that plan. This practice was working but recently started failing with a TypeError when attempting a scan of a plan.
**Examples**
terraform plan -no-color --out tfplan.binary
terraform show -no-color -json tfplan.binary > ${{ github.workspace }}/${{steps.get-plan-file-name.outputs.name}}
checkov -f ${{ github.workspace }}/${{steps.get-plan-file-name.outputs.name}} \
--framework terraform_plan \
--skip-check
**Exception Trace**
```2022-12-14 16:00:44,343 [MainThread ] [DEBUG] Leveraging the bundled IAM Definition.
2022-12-14 16:00:44,343 [MainThread ] [DEBUG] Leveraging the IAM definition at /opt/hostedtoolcache/Python/3.10.8/x64/lib/python3.10/site-packages/policy_sentry/shared/data/iam-definition.json
2022-12-14 16:00:44,589 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f188f6cffd0> with order 0
2022-12-14 16:00:44,589 [MainThread ] [DEBUG] self.features after the sort:
2022-12-14 16:00:44,589 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f188f6cffd0>]
2022-12-14 16:00:44,590 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.suppressions_integration.SuppressionsIntegration object at 0x7f188cae24d0> with order 2
2022-12-14 16:00:44,590 [MainThread ] [DEBUG] self.features after the sort:
2022-12-14 16:00:44,590 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f188f6cffd0>, <checkov.common.bridgecrew.integration_features.features.suppressions_integration.SuppressionsIntegration object at 0x7f188cae24d0>]
2022-12-14 16:00:44,591 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.licensing_integration.LicensingIntegration object at 0x7f188cae2950> with order 6
2022-12-14 16:00:44,591 [MainThread ] [DEBUG] self.features after the sort:
2022-12-14 16:00:44,591 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f188f6cffd0>, <checkov.common.bridgecrew.integration_features.features.suppressions_integration.SuppressionsIntegration object at 0x7f188cae24d0>, <checkov.common.bridgecrew.integration_features.features.licensing_integration.LicensingIntegration object at 0x7f188cae2950>]
2022-12-14 16:00:44,591 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.repo_config_integration.RepoConfigIntegration object at 0x7f188cae2f20> with order 0
2022-12-14 16:00:44,591 [MainThread ] [DEBUG] self.features after the sort:
2022-12-14 16:00:44,591 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f188f6cffd0>, <checkov.common.bridgecrew.integration_features.features.repo_config_integration.RepoConfigIntegration object at 0x7f188cae2f20>, <checkov.common.bridgecrew.integration_features.features.suppressions_integration.SuppressionsIntegration object at 0x7f188cae24d0>, <checkov.common.bridgecrew.integration_features.features.licensing_integration.LicensingIntegration object at 0x7f188cae2950>]
2022-12-14 16:00:44,592 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.fixes_integration.FixesIntegration object at 0x7f188cae3550> with order 10
2022-12-14 16:00:44,592 [MainThread ] [DEBUG] self.features after the sort:
2022-12-14 16:00:44,592 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f188f6cffd0>, <checkov.common.bridgecrew.integration_features.features.repo_config_integration.RepoConfigIntegration object at 0x7f188cae2f20>, <checkov.common.bridgecrew.integration_features.features.suppressions_integration.SuppressionsIntegration object at 0x7f188cae24d0>, <checkov.common.bridgecrew.integration_features.features.licensing_integration.LicensingIntegration object at 0x7f188cae2950>, <checkov.common.bridgecrew.integration_features.features.fixes_integration.FixesIntegration object at 0x7f188cae3550>]
2022-12-14 16:00:44,593 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.custom_policies_integration.CustomPoliciesIntegration object at 0x7f188cae39d0> with order 1
2022-12-14 16:00:44,593 [MainThread ] [DEBUG] self.features after the sort:
2022-12-14 16:00:44,593 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f188f6cffd0>, <checkov.common.bridgecrew.integration_features.features.repo_config_integration.RepoConfigIntegration object at 0x7f188cae2f20>, <checkov.common.bridgecrew.integration_features.features.custom_policies_integration.CustomPoliciesIntegration object at 0x7f188cae39d0>, <checkov.common.bridgecrew.integration_features.features.suppressions_integration.SuppressionsIntegration object at 0x7f188cae24d0>, <checkov.common.bridgecrew.integration_features.features.licensing_integration.LicensingIntegration object at 0x7f188cae2950>, <checkov.common.bridgecrew.integration_features.features.fixes_integration.FixesIntegration object at 0x7f188cae3550>]
2022-12-14 16:00:44,620 [MainThread ] [DEBUG] Loading external checks from /opt/hostedtoolcache/Python/3.10.8/x64/lib/python3.10/site-packages/checkov/bicep/checks/graph_checks
2022-12-14 16:00:44,749 [MainThread ] [DEBUG] Popen(['git', 'version'], cwd=/home/runner/work/aws-infra/aws-infra, universal_newlines=False, shell=None, istream=None)
2022-12-14 16:00:44,767 [MainThread ] [DEBUG] Popen(['git', 'version'], cwd=/home/runner/work/aws-infra/aws-infra, universal_newlines=False, shell=None, istream=None)
2022-12-14 16:00:45,180 [MainThread ] [DEBUG] No API key present; setting include_all_checkov_policies to True
2022-12-14 16:00:45,180 [MainThread ] [DEBUG] Run metadata: {
"checkov_version": "2.2.155",
"python_executable": "/opt/hostedtoolcache/Python/3.10.8/x64/bin/python",
"python_version": "3.10.8 (main, Oct 18 2022, 06:43:21) [GCC 9.4.0]",
"checkov_executable": "/opt/hostedtoolcache/Python/3.10.8/x64/bin/checkov",
"args": [
"Command Line Args: -f formatted_plan_tfplan_infrastructure_workspaces_app_qa1-common_1937.json --framework terraform_plan --skip-check CKV_AWS_18,CKV_AWS_19,CKV_AWS_21,CKV_AWS_26,CKV_AWS_30,CKV_AWS_31,CKV_AWS_65,CKV_AWS_144,CKV_AWS_145,CKV_AWS_166,CKV_AWS_186,CKV_AWS_191,CKV_AWS_224,CKV_AWS_233,CKV2_AWS_5,CKV2_AWS_6,CKV2_AWS_34,CKV_AWS_158",
"Defaults:",
" --branch: master",
" --download-external-modules:False",
" --external-modules-download-path:.external_modules",
" --evaluate-variables:True",
" --secrets-scan-file-type:[]",
" --block-list-secret-scan:[]",
" --summary-position:top",
""
]
}
2022-12-14 16:00:45,181 [MainThread ] [DEBUG] Resultant set of frameworks (removing skipped frameworks): terraform_plan
2022-12-14 16:00:45,182 [MainThread ] [DEBUG] terraform_plan_runner declares no system dependency checks required.
2022-12-14 16:00:45,182 [MainThread ] [DEBUG] No API key found. Scanning locally only.
2022-12-14 16:00:45,249 [MainThread ] [DEBUG] Got checkov mappings and guidelines from Bridgecrew platform
2022-12-14 16:00:45,249 [MainThread ] [DEBUG] Loading external checks from /opt/hostedtoolcache/Python/3.10.8/x64/lib/python3.10/site-packages/checkov/terraform/checks/graph_checks
2022-12-14 16:00:45,250 [MainThread ] [DEBUG] Searching through ['gcp', '__pycache__', 'aws', 'azure'] and ['__init__.py']
2022-12-14 16:00:45,250 [MainThread ] [DEBUG] Searching through [] and ['GCPContainerRegistryReposAreNotPubliclyAccessible.yaml', 'GCPComputeFirewallOverlyPermissiveToAllTraffic.yaml', 'GKEClustersAreNotUsingDefaultServiceAccount.yaml', 'ServiceAccountHasGCPmanagedKey.yaml', 'GCPAuditLogsConfiguredForAllServicesAndUsers.yaml', 'GCPKMSKeyRingsAreNotPubliclyAccessible.yaml', 'DisableAccessToSqlDBInstanceForRootUsersWithoutPassword.yaml', 'GCPProjectHasNoLegacyNetworks.yaml', 'GCPLogBucketsConfiguredUsingLock.yaml', 'GCPKMSCryptoKeysAreNotPubliclyAccessible.yaml', 'CloudFunctionSecureHTTPTrigger.yaml', 'GCRContainerVulnerabilityScanningEnabled.yaml']
2022-12-14 16:00:45,300 [MainThread ] [DEBUG] Searching through [] and ['__init__.cpython-310.pyc']
2022-12-14 16:00:45,300 [MainThread ] [DEBUG] Searching through [] and ['VPCHasFlowLog.yaml', 'S3BucketHasPublicAccessBlock.yaml', 'EBSAddedBackup.yaml', 'WAF2HasLogs.yaml', 'S3PublicACLRead.yaml', 'S3BucketLogging.yaml', 'AppLoadBalancerTLS12.yaml', 'EFSAddedBackup.yaml', 'Route53ZoneHasMatchingQueryLog.yaml', 'S3KMSEncryptedByDefault.yaml', 'VPCHasRestrictedSG.yaml', 'ALBRedirectsHTTPToHTTPS.yaml', 'S3BucketVersioning.yaml', 'Route53ARecordAttachedResource.yaml', 'AppSyncProtectedByWAF.yaml', 'Route53ZoneEnableDNSSECSigning.yaml', 'SubnetHasACL.yaml', 'RDSClusterHasBackupPlan.yaml', 'AutoScalingEnableOnDynamoDBTables.yaml', 'IAMGroupHasAtLeastOneUser.yaml', 'APIGWLoggingLevelsDefinedProperly.yaml', 'PostgresRDSHasQueryLoggingEnabled.yaml', 'IAMUserHasNoConsoleAccess.yaml', 'APIProtectedByWAF.yaml', 'S3BucketEncryption.yaml', 'VPCPeeringRouteTableOverlyPermissive.yaml', 'ALBProtectedByWAF.yaml', 'PostgresDBHasQueryLoggingEnabled.yaml', 'EC2InstanceHasIAMRoleAttached.yaml', 'S3PublicACLWrite.yaml', 'AWSNATGatewaysshouldbeutilized.yaml', 'SGAttachedToResource.yaml', 'AutoScallingEnabledELB.yaml', 'GuardDutyIsEnabled.yaml', 'HTTPNotSendingPasswords.yaml', 'CodecommitApprovalRulesAttached.yaml', 'S3NotAllowAccessToAllAuthenticatedUsers.yaml', 'CloudFrontHasCustomSSLCertificate.yaml', 'CloudFrontHasResponseHeadersPolicy.yaml', 'EIPAllocatedToVPCAttachedEC2.yaml', 'CloudtrailHasCloudwatch.yaml', 'IAMPolicyNotAllowFullIAMAccess.yaml', 'IAMUsersAreMembersAtLeastOneGroup.yaml', 'AWSSSMParameterShouldBeEncrypted.yaml', 'S3BucketReplicationConfiguration.yaml', 'AMRClustersNotOpenToInternet.yaml', 'EncryptedEBSVolumeOnlyConnectedToEC2s.yaml']
2022-12-14 16:00:45,458 [MainThread ] [DEBUG] Searching through [] and ['StorageLoggingIsEnabledForBlobService.yaml', 'SQLServerAuditingEnabled.yaml', 'AzureDataFactoriesEncryptedWithCustomerManagedKey.yaml', 'StorageContainerActivityLogsNotPublic.yaml', 'ApplicationGatewayEnablesWAF.yaml', 'VirtualMachinesUtilizingManagedDisks.yaml', 'AzureActiveDirectoryAdminIsConfigured.yaml', 'AzureNetworkInterfacePublicIPAddressId.yaml', 'AzureAntimalwareIsConfiguredWithAutoUpdatesForVMs.yaml', 'SQLServerAuditingRetention90Days.yaml', 'VAconfiguredToSendReportsToAdmins.yaml', 'MSQLenablesCustomerManagedKey.yaml', 'AzureUnattachedDisksAreEncrypted.yaml', 'VAconfiguredToSendReports.yaml', 'StorageCriticalDataEncryptedCMK.yaml', 'AccessToPostgreSQLFromAzureServicesIsDisabled.yaml', 'PGSQLenablesCustomerManagedKey.yaml', 'AzureMSSQLServerHasSecurityAlertPolicy.yaml', 'StorageLoggingIsEnabledForTableService.yaml', 'AzureStorageAccountsUseCustomerManagedKeyForEncryption.yaml', 'AzureSynapseWorkspacesHaveNoIPFirewallRulesAttached.yaml', 'DataExplorerEncryptionUsesCustomKey.yaml', 'CognitiveServicesCustomerManagedKey.yaml', 'VAsetPeriodicScansOnSQL.yaml', 'VAisEnabledInStorageAccount.yaml', 'VMHasBackUpMachine.yaml']
2022-12-14 16:00:45,525 [MainThread ] [DEBUG] Loading external checks from /opt/hostedtoolcache/Python/3.10.8/x64/lib/python3.10/site-packages/checkov/cloudformation/checks/graph_checks
2022-12-14 16:00:45,525 [MainThread ] [DEBUG] Searching through ['__pycache__'] and ['__init__.py']
2022-12-14 16:00:45,525 [MainThread ] [DEBUG] Searching through [] and ['__init__.cpython-310.pyc']
2022-12-14 16:00:45,525 [MainThread ] [DEBUG] Loading external checks from /opt/hostedtoolcache/Python/3.10.8/x64/lib/python3.10/site-packages/checkov/kubernetes/checks/graph_checks
2022-12-14 16:00:45,525 [MainThread ] [DEBUG] Loading external checks from /opt/hostedtoolcache/Python/3.10.8/x64/lib/python3.10/site-packages/checkov/bicep/checks/graph_checks
2022-12-14 16:00:45,525 [MainThread ] [DEBUG] Loading external checks from /opt/hostedtoolcache/Python/3.10.8/x64/lib/python3.10/site-packages/checkov/terraform_plan/checks/graph_checks
2022-12-14 16:00:45,529 [MainThread ] [DEBUG] Running without API key, so only open source runners will be enabled
2022-12-14 16:00:45,529 [MainThread ] [DEBUG] Filtered list of policies: []
2022-12-14 16:00:45,529 [MainThread ] [DEBUG] Received the following policy-level suppressions, that will be skipped from running: []
2022-12-14 16:00:45,529 [MainThread ] [DEBUG] Filtered runners based on file type(s). Result: ['terraform_plan']
2022-12-14 16:00:45,530 [MainThread ] [DEBUG] Checking if terraform_plan is valid for license
2022-12-14 16:00:45,530 [MainThread ] [DEBUG] Open source mode - the runner is {"en" if enabled else "dis"}abled
2022-12-14 16:00:45,530 [MainThread ] [DEBUG] [tf_plan] - Parsing file /home/runner/work/aws-infra/aws-infra/formatted_plan_tfplan_infrastructure_workspaces_app_qa1-common_1937.json
2022-12-14 16:00:45,742 [MainThread ] [DEBUG] [tf_plan] - Successfully parsed file /home/runner/work/aws-infra/aws-infra/formatted_plan_tfplan_infrastructure_workspaces_app_qa1-common_1937.json
Traceback (most recent call last):
File "/opt/hostedtoolcache/Python/3.10.8/x64/bin/checkov", line 9, in <module>
sys.exit(run())
File "/opt/hostedtoolcache/Python/3.10.8/x64/lib/python3.10/site-packages/checkov/main.py", line 399, in run
scan_reports = runner_registry.run(external_checks_dir=external_checks_dir, files=config.file,
File "/opt/hostedtoolcache/Python/3.10.8/x64/lib/python3.10/site-packages/checkov/common/runners/runner_registry.py", line 87, in run
self.runners[0].run(root_folder, external_checks_dir=external_checks_dir, files=files,
File "/opt/hostedtoolcache/Python/3.10.8/x64/lib/python3.10/site-packages/checkov/terraform/plan_runner.py", line 104, in run
censored_definitions = omit_secret_value_from_definitions(definitions=self.definitions,
File "/opt/hostedtoolcache/Python/3.10.8/x64/lib/python3.10/site-packages/checkov/common/util/secrets.py", line 193, in omit_secret_value_from_definitions
censored_value = omit_secret_value_from_line(secret, secret)
File "/opt/hostedtoolcache/Python/3.10.8/x64/lib/python3.10/site-packages/checkov/common/util/secrets.py", line 126, in omit_secret_value_from_line
secret_length = len(secret)
TypeError: object of type 'bool' has no len()
Error: Process completed with exit code 1.
```
**Desktop (please complete the following information):**
This issue has been replicated with every combination of the following:
- OS: Ubuntu 20.04 and Ubuntu 22.04
- Checkov Version: 2.2.148 and 2.2.155
- Python Version: 3.10.8 and 3.11.1
| [
{
"content": "from __future__ import annotations\n\nimport copy\nimport itertools\nimport json\nimport logging\nimport re\n\n# secret categories for use as constants\nfrom typing import Any, Dict, TYPE_CHECKING\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\n\nif TYPE_CHECKING:\n from checkov.common.checks.base_check import BaseCheck\n from checkov.common.typing import _CheckResult, ResourceAttributesToOmit\n from pycep.typing import ParameterAttributes, ResourceAttributes\n from checkov.common.parsers.node import DictNode\n\n\nAWS = 'aws'\nAZURE = 'azure'\nGCP = 'gcp'\nGENERAL = 'general'\nALL = 'all'\n\n# Taken from various git-secrets forks that add Azure and GCP support to base AWS.\n# The groups here are the result of running git secrets --register-[aws|azure|gcp]\n# https://github.com/awslabs/git-secrets\n# https://github.com/deshpandetanmay/git-secrets\n# https://github.com/msalemcode/git-secrets#options-for-register-azure\n_secrets_regexes = {\n 'azure': [\n \"(\\\"|')?([0-9A-Fa-f]{4}-){4}[0-9A-Fa-f]{12}(\\\"|')?\", # client_secret\n \"(\\\"|')?[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}(\\\"|')?\", # client_id and many other forms of IDs\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][o|O][n|N][m|M][i|I][c|C][r|R][o|O][s|S][o|O][f|F][t|T][.][c|C][o|O][m|M](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][b|B][l|L][o|O][b|B][.][c|C][o|O][r|R][e|E][.][w|W][i|I][n|N][d|D][o|O][w|W][s|S][.][n|N][e|E][t|T](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][q|Q][u|U][e|E][u|U][e|E][.][c|C][o|O][r|R][e|E][.][w|W][i|I][n|N][d|D][o|O][w|W][s|S][.][n|N][e|E][t|T](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][t|T][a|A][b|B][l|L][e|E][.][c|C][o|O][r|R][e|E][.][w|W][i|I][n|N][d|D][o|O][w|W][s|S][.][n|N][e|E][t|T](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][d|D][a|A][t|T][a|A][b|B][a|A][s|S][e|E][.][w|W][i|I][n|N][d|D][o|O][w|W][s|S][.][n|N][e|E][t|T](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][s|S][e|E][r|R][v|V][i|I][c|C][e|E][b|B][u|U][s|S][.][w|W][i|I][n|N][d|D][o|O][w|W][s|S][.][n|N][e|E][t|T](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][t|T][i|I][m|M][e|E][s|S][e|E][r|R][i|I][e|E][s|S][.][a|A][z|Z][u|U][r|R][e|E][.][c|C][o|O][m|M](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][a|T][c|C][c|C][e|E][s|S][s|S][c|C][o|O][n|N][t|T][r|R][o|O][l|L][.][w|W][i|I][n|N][d|D][o|O][w|W][s|S][.][n|N][e|E][t|T](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][a|A][z|Z][u|U][r|R][e|E][h|H][d|D][i|I][n|N][s|S][i|I][g|G][h|H][t|T][.][n|N][e|E][t|T](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][c|C][l|L][o|O][u|U][d|D][a|A][p|P][p|P][.][a|A][z|Z][u|U][r|R][e|E][.][c|C][o|O][m|M](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][c|C][l|L][o|O][u|U][d|D][a|A][p|P][p|P][.][n|N][e|E][t|T](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][d|D][o|O][c|C][u|U][m|M][e|E][n|N][t|T][s|S][.][a|A][z|Z][u|U][r|R][e|E][.][c|C][o|O][m|M](\\\"|')?\",\n ],\n\n 'aws': [\n \"(?<![A-Za-z0-9/+=])[A-Za-z0-9/+=]{40}(?![A-Za-z0-9/+=])\", # AWS secret access key\n \"(A3T[A-Z0-9]|AKIA|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}\", # AWS access key ID\n \"(\\\"|')?(AWS|aws|Aws)?_?(SECRET|secret|Secret)?_?(ACCESS|access|Access)?_?(KEY|key|Key)(\\\"|')?\\\\s*(:|=>|=)\\\\s*(\\\"|')?[A-Za-z0-9/\\\\+=]{40}(\\\"|')?\",\n \"(\\\"|')?(AWS|aws|Aws)?_?(ACCOUNT|account|Account)_?(ID|id|Id)?(\\\"|')?\\\\s*(:|=>|=)\\\\s*(\\\"|')?[0-9]{4}\\\\-?[0-9]{4}\\\\-?[0-9]{4}(\\\"|')?\"\n ],\n\n 'gcp': [\n \"\\bprivate_key.*\\b\"\n ],\n\n 'general': [\n \"^-----BEGIN (RSA|EC|DSA|GPP) PRIVATE KEY-----$\",\n ]\n}\n\n# first compile each unique regex while maintaining the mapping\n_patterns = {k: [re.compile(p, re.DOTALL) for p in v] for k, v in _secrets_regexes.items()}\n\n# now combine all the compiled patterns into one long list\n_patterns['all'] = list(itertools.chain.from_iterable(_patterns.values()))\n\n_hash_patterns = list(map(lambda regex: re.compile(regex, re.IGNORECASE), ['^[a-f0-9]{32}$', '^[a-f0-9]{40}$']))\n\n\ndef is_hash(s: str) -> bool:\n \"\"\"\n Checks whether a string is a MD5 or SHA1 hash\n\n :param s:\n :return:\n \"\"\"\n return any(pattern.search(s) for pattern in _hash_patterns)\n\n\ndef string_has_secrets(s: str, *categories: str) -> bool:\n \"\"\"\n Check whether the specified string has any matches for the regexes in the specified category(ies).\n\n If categories is blank, then this method checks all categories. It is recommended to use the category constants\n provided.\n\n Examples:\n string_has_secrets(some_string) -> checks all regexes\n string_has_secrets(some_string, AWS, GENERAL) -> checks only AWS and general regexes.\n\n :param s:\n :param categories:\n :return:\n \"\"\"\n\n # set a default if no category is provided; or, if categories were provided and they include 'all', then just set it\n # explicitly so we don't do any duplication\n if not categories or \"all\" in categories:\n categories = (\"all\",)\n\n if is_hash(s):\n return False\n\n for c in categories:\n if any([pattern.search(s) for pattern in _patterns[c]]):\n return True\n return False\n\n\ndef omit_multiple_secret_values_from_line(secrets: set[str], line_text: str) -> str:\n censored_line = line_text\n for secret in secrets:\n censored_line = omit_secret_value_from_line(secret, censored_line)\n return censored_line\n\n\ndef omit_secret_value_from_line(secret: str | None, line_text: str) -> str:\n if not secret:\n return line_text\n\n secret_length = len(secret)\n secret_len_to_expose = secret_length // 4 if secret_length < 100 else secret_length // 10\n\n try:\n secret_index = line_text.index(secret)\n except ValueError:\n try:\n secret_index = line_text.index(json.dumps(secret))\n except ValueError:\n return line_text\n\n censored_line = f'{line_text[:secret_index + secret_len_to_expose]}' \\\n f'{\"*\" * (secret_length - secret_len_to_expose)}' \\\n f'{line_text[secret_index + secret_length:]}'\n return censored_line\n\n\ndef omit_secret_value_from_checks(check: BaseCheck, check_result: dict[str, CheckResult] | _CheckResult,\n entity_code_lines: list[tuple[int, str]],\n entity_config: dict[str, Any] | ParameterAttributes | ResourceAttributes,\n resource_attributes_to_omit: ResourceAttributesToOmit | None = None) -> \\\n list[tuple[int, str]]:\n secrets = set() # a set, to efficiently avoid duplicates in case the same secret is found in the following conditions\n censored_code_lines = []\n\n if CheckCategories.SECRETS in check.categories and check_result.get('result') == CheckResult.FAILED:\n secrets.update([str(secret) for key, secret in entity_config.items() if\n key.startswith(f'{check.id}_secret')])\n\n if resource_attributes_to_omit and check.entity_type in resource_attributes_to_omit:\n for attribute_to_omit in [attr for attr in resource_attributes_to_omit.get(check.entity_type) if attr in entity_config]: # type:ignore[union-attr]\n secret = entity_config.get(attribute_to_omit)\n if isinstance(secret, list) and secret:\n secrets.add(secret[0])\n\n if not secrets:\n logging.debug(f\"Secret was not saved in {check.id}, can't omit\")\n return entity_code_lines\n\n for idx, line in entity_code_lines:\n censored_line = omit_multiple_secret_values_from_line(secrets, line)\n censored_code_lines.append((idx, censored_line))\n\n return censored_code_lines\n\n\ndef omit_secret_value_from_definitions(definitions: Dict[str, DictNode],\n resource_attributes_to_omit: ResourceAttributesToOmit) -> Dict[str, DictNode]:\n \"\"\"\n Mask secret values from definitions, as a way to mask these values in the created graph.\n Should be used only in runners that have the resource_attributes_to_omit mapping\n \"\"\"\n found_secrets = False\n censored_definitions = definitions\n for file, definition in definitions.items():\n for i, resource in enumerate(definition.get('resource', [])):\n for resource_type in [r_type for r_type in resource if r_type in resource_attributes_to_omit]:\n for resource_name, resource_config in resource[resource_type].items():\n for attribute in [attribute for attribute in resource_config if\n attribute in resource_attributes_to_omit[resource_type]]:\n if not found_secrets:\n found_secrets = True\n # The values in self.definitions shouldn't be changed so that checks' results\n # of checks that rely on the definitions values are not affected.\n # Hence, if secrets are found, we should censor them in a deep copy of self.definitions\n censored_definitions = copy.deepcopy(definitions)\n secret = resource_config[attribute][0]\n censored_value = omit_secret_value_from_line(secret, secret)\n censored_definitions[file]['resource'][i][resource_type][resource_name][attribute] = \\\n [censored_value]\n return censored_definitions\n\n\ndef get_secrets_from_string(s: str, *categories: str) -> list[str]:\n # set a default if no category is provided; or, if categories were provided and they include 'all', then just set it\n # explicitly so we don't do any duplication\n if not categories or \"all\" in categories:\n categories = (\"all\",)\n\n if is_hash(s):\n return list()\n\n secrets: list[str] = []\n for c in categories:\n matches: list[str] = []\n for pattern in _patterns[c]:\n _matches = re.finditer(pattern, s)\n matches.extend([str(match.group()) for match in _matches])\n if matches:\n secrets.extend(matches)\n return secrets\n",
"path": "checkov/common/util/secrets.py"
}
] | [
{
"content": "from __future__ import annotations\n\nimport copy\nimport itertools\nimport json\nimport logging\nimport re\n\n# secret categories for use as constants\nfrom typing import Any, Dict, TYPE_CHECKING\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\n\nif TYPE_CHECKING:\n from checkov.common.checks.base_check import BaseCheck\n from checkov.common.typing import _CheckResult, ResourceAttributesToOmit\n from pycep.typing import ParameterAttributes, ResourceAttributes\n from checkov.common.parsers.node import DictNode\n\n\nAWS = 'aws'\nAZURE = 'azure'\nGCP = 'gcp'\nGENERAL = 'general'\nALL = 'all'\n\n# Taken from various git-secrets forks that add Azure and GCP support to base AWS.\n# The groups here are the result of running git secrets --register-[aws|azure|gcp]\n# https://github.com/awslabs/git-secrets\n# https://github.com/deshpandetanmay/git-secrets\n# https://github.com/msalemcode/git-secrets#options-for-register-azure\n_secrets_regexes = {\n 'azure': [\n \"(\\\"|')?([0-9A-Fa-f]{4}-){4}[0-9A-Fa-f]{12}(\\\"|')?\", # client_secret\n \"(\\\"|')?[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}(\\\"|')?\", # client_id and many other forms of IDs\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][o|O][n|N][m|M][i|I][c|C][r|R][o|O][s|S][o|O][f|F][t|T][.][c|C][o|O][m|M](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][b|B][l|L][o|O][b|B][.][c|C][o|O][r|R][e|E][.][w|W][i|I][n|N][d|D][o|O][w|W][s|S][.][n|N][e|E][t|T](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][q|Q][u|U][e|E][u|U][e|E][.][c|C][o|O][r|R][e|E][.][w|W][i|I][n|N][d|D][o|O][w|W][s|S][.][n|N][e|E][t|T](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][t|T][a|A][b|B][l|L][e|E][.][c|C][o|O][r|R][e|E][.][w|W][i|I][n|N][d|D][o|O][w|W][s|S][.][n|N][e|E][t|T](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][d|D][a|A][t|T][a|A][b|B][a|A][s|S][e|E][.][w|W][i|I][n|N][d|D][o|O][w|W][s|S][.][n|N][e|E][t|T](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][s|S][e|E][r|R][v|V][i|I][c|C][e|E][b|B][u|U][s|S][.][w|W][i|I][n|N][d|D][o|O][w|W][s|S][.][n|N][e|E][t|T](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][t|T][i|I][m|M][e|E][s|S][e|E][r|R][i|I][e|E][s|S][.][a|A][z|Z][u|U][r|R][e|E][.][c|C][o|O][m|M](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][a|T][c|C][c|C][e|E][s|S][s|S][c|C][o|O][n|N][t|T][r|R][o|O][l|L][.][w|W][i|I][n|N][d|D][o|O][w|W][s|S][.][n|N][e|E][t|T](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][a|A][z|Z][u|U][r|R][e|E][h|H][d|D][i|I][n|N][s|S][i|I][g|G][h|H][t|T][.][n|N][e|E][t|T](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][c|C][l|L][o|O][u|U][d|D][a|A][p|P][p|P][.][a|A][z|Z][u|U][r|R][e|E][.][c|C][o|O][m|M](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][c|C][l|L][o|O][u|U][d|D][a|A][p|P][p|P][.][n|N][e|E][t|T](\\\"|')?\",\n \"(\\\"|')?.*[0-9a-zA-Z]{2,256}[.][d|D][o|O][c|C][u|U][m|M][e|E][n|N][t|T][s|S][.][a|A][z|Z][u|U][r|R][e|E][.][c|C][o|O][m|M](\\\"|')?\",\n ],\n\n 'aws': [\n \"(?<![A-Za-z0-9/+=])[A-Za-z0-9/+=]{40}(?![A-Za-z0-9/+=])\", # AWS secret access key\n \"(A3T[A-Z0-9]|AKIA|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}\", # AWS access key ID\n \"(\\\"|')?(AWS|aws|Aws)?_?(SECRET|secret|Secret)?_?(ACCESS|access|Access)?_?(KEY|key|Key)(\\\"|')?\\\\s*(:|=>|=)\\\\s*(\\\"|')?[A-Za-z0-9/\\\\+=]{40}(\\\"|')?\",\n \"(\\\"|')?(AWS|aws|Aws)?_?(ACCOUNT|account|Account)_?(ID|id|Id)?(\\\"|')?\\\\s*(:|=>|=)\\\\s*(\\\"|')?[0-9]{4}\\\\-?[0-9]{4}\\\\-?[0-9]{4}(\\\"|')?\"\n ],\n\n 'gcp': [\n \"\\bprivate_key.*\\b\"\n ],\n\n 'general': [\n \"^-----BEGIN (RSA|EC|DSA|GPP) PRIVATE KEY-----$\",\n ]\n}\n\n# first compile each unique regex while maintaining the mapping\n_patterns = {k: [re.compile(p, re.DOTALL) for p in v] for k, v in _secrets_regexes.items()}\n\n# now combine all the compiled patterns into one long list\n_patterns['all'] = list(itertools.chain.from_iterable(_patterns.values()))\n\n_hash_patterns = list(map(lambda regex: re.compile(regex, re.IGNORECASE), ['^[a-f0-9]{32}$', '^[a-f0-9]{40}$']))\n\n\ndef is_hash(s: str) -> bool:\n \"\"\"\n Checks whether a string is a MD5 or SHA1 hash\n\n :param s:\n :return:\n \"\"\"\n return any(pattern.search(s) for pattern in _hash_patterns)\n\n\ndef string_has_secrets(s: str, *categories: str) -> bool:\n \"\"\"\n Check whether the specified string has any matches for the regexes in the specified category(ies).\n\n If categories is blank, then this method checks all categories. It is recommended to use the category constants\n provided.\n\n Examples:\n string_has_secrets(some_string) -> checks all regexes\n string_has_secrets(some_string, AWS, GENERAL) -> checks only AWS and general regexes.\n\n :param s:\n :param categories:\n :return:\n \"\"\"\n\n # set a default if no category is provided; or, if categories were provided and they include 'all', then just set it\n # explicitly so we don't do any duplication\n if not categories or \"all\" in categories:\n categories = (\"all\",)\n\n if is_hash(s):\n return False\n\n for c in categories:\n if any([pattern.search(s) for pattern in _patterns[c]]):\n return True\n return False\n\n\ndef omit_multiple_secret_values_from_line(secrets: set[str], line_text: str) -> str:\n censored_line = line_text\n for secret in secrets:\n censored_line = omit_secret_value_from_line(secret, censored_line)\n return censored_line\n\n\ndef omit_secret_value_from_line(secret: str | None, line_text: str) -> str:\n if not secret or not isinstance(secret, str):\n return line_text\n\n secret_length = len(secret)\n secret_len_to_expose = secret_length // 4 if secret_length < 100 else secret_length // 10\n\n try:\n secret_index = line_text.index(secret)\n except ValueError:\n try:\n secret_index = line_text.index(json.dumps(secret))\n except ValueError:\n return line_text\n\n censored_line = f'{line_text[:secret_index + secret_len_to_expose]}' \\\n f'{\"*\" * (secret_length - secret_len_to_expose)}' \\\n f'{line_text[secret_index + secret_length:]}'\n return censored_line\n\n\ndef omit_secret_value_from_checks(check: BaseCheck, check_result: dict[str, CheckResult] | _CheckResult,\n entity_code_lines: list[tuple[int, str]],\n entity_config: dict[str, Any] | ParameterAttributes | ResourceAttributes,\n resource_attributes_to_omit: ResourceAttributesToOmit | None = None) -> \\\n list[tuple[int, str]]:\n secrets = set() # a set, to efficiently avoid duplicates in case the same secret is found in the following conditions\n censored_code_lines = []\n\n if CheckCategories.SECRETS in check.categories and check_result.get('result') == CheckResult.FAILED:\n secrets.update([str(secret) for key, secret in entity_config.items() if\n key.startswith(f'{check.id}_secret')])\n\n if resource_attributes_to_omit and check.entity_type in resource_attributes_to_omit:\n for attribute_to_omit in [attr for attr in resource_attributes_to_omit.get(check.entity_type) if attr in entity_config]: # type:ignore[union-attr]\n secret = entity_config.get(attribute_to_omit)\n if isinstance(secret, list) and secret:\n secrets.add(secret[0])\n\n if not secrets:\n logging.debug(f\"Secret was not saved in {check.id}, can't omit\")\n return entity_code_lines\n\n for idx, line in entity_code_lines:\n censored_line = omit_multiple_secret_values_from_line(secrets, line)\n censored_code_lines.append((idx, censored_line))\n\n return censored_code_lines\n\n\ndef omit_secret_value_from_definitions(definitions: Dict[str, DictNode],\n resource_attributes_to_omit: ResourceAttributesToOmit) -> Dict[str, DictNode]:\n \"\"\"\n Mask secret values from definitions, as a way to mask these values in the created graph.\n Should be used only in runners that have the resource_attributes_to_omit mapping\n \"\"\"\n found_secrets = False\n censored_definitions = definitions\n for file, definition in definitions.items():\n for i, resource in enumerate(definition.get('resource', [])):\n for resource_type in [r_type for r_type in resource if r_type in resource_attributes_to_omit]:\n for resource_name, resource_config in resource[resource_type].items():\n for attribute in [attribute for attribute in resource_config if\n attribute in resource_attributes_to_omit[resource_type]]:\n if not found_secrets:\n found_secrets = True\n # The values in self.definitions shouldn't be changed so that checks' results\n # of checks that rely on the definitions values are not affected.\n # Hence, if secrets are found, we should censor them in a deep copy of self.definitions\n censored_definitions = copy.deepcopy(definitions)\n secret = resource_config[attribute][0]\n censored_value = omit_secret_value_from_line(secret, secret)\n censored_definitions[file]['resource'][i][resource_type][resource_name][attribute] = \\\n [censored_value]\n return censored_definitions\n\n\ndef get_secrets_from_string(s: str, *categories: str) -> list[str]:\n # set a default if no category is provided; or, if categories were provided and they include 'all', then just set it\n # explicitly so we don't do any duplication\n if not categories or \"all\" in categories:\n categories = (\"all\",)\n\n if is_hash(s):\n return list()\n\n secrets: list[str] = []\n for c in categories:\n matches: list[str] = []\n for pattern in _patterns[c]:\n _matches = re.finditer(pattern, s)\n matches.extend([str(match.group()) for match in _matches])\n if matches:\n secrets.extend(matches)\n return secrets\n",
"path": "checkov/common/util/secrets.py"
}
] | diff --git a/checkov/common/util/secrets.py b/checkov/common/util/secrets.py
index 0cf3bf1fc5..b0642fbd3a 100644
--- a/checkov/common/util/secrets.py
+++ b/checkov/common/util/secrets.py
@@ -120,7 +120,7 @@ def omit_multiple_secret_values_from_line(secrets: set[str], line_text: str) ->
def omit_secret_value_from_line(secret: str | None, line_text: str) -> str:
- if not secret:
+ if not secret or not isinstance(secret, str):
return line_text
secret_length = len(secret)
diff --git a/tests/unit/test_secrets.py b/tests/unit/test_secrets.py
index c42df50a03..cfc535e772 100644
--- a/tests/unit/test_secrets.py
+++ b/tests/unit/test_secrets.py
@@ -49,6 +49,12 @@ def test_omit_none_secret_from_line(self):
line = 'text'
self.assertEqual(line, omit_secret_value_from_line(secret=None, line_text=line))
+ def test_omit_non_string_secret_from_line(self):
+ line = 'text'
+ secret = True
+
+ self.assertEqual(line, omit_secret_value_from_line(secret, line))
+
def test_get_secrets_from_secrets(self):
s = 'access_key: "AKIAIOSFODNN7EXAMPLE"'
|
saleor__saleor-340 | Move py.test config to tox.ini
Pytest (like many tools) can read its configuration from `tox.ini`. There is no need to keep a separate `pytest.ini`.
| [
{
"content": "#! /usr/bin/env python\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\nimport os\nimport sys\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'saleor.settings')\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n #import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\n\nsetup(\n name='saleor',\n author='Mirumee Software',\n author_email='[email protected]',\n description=\"A fork'n'play e-commerce in Django\",\n license='BSD',\n version='0.1.0a0',\n url='http://getsaleor.com/',\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'Babel>=1.3,<1.4a0',\n 'BabelDjango>=0.2,<0.3a0',\n 'Django>=1.8',\n 'dj_database_url>=0.3.0',\n 'django-emailit>=0.2.2',\n 'django-materializecss-form==0.0.64',\n 'django-model-utils>=2.0.0,<2.1a0',\n 'django-mptt>=0.7.1',\n 'django-offsite-storage>=0.0.5',\n 'django-payments>=0.7.0,<0.8a0',\n 'django-prices>=0.4.0,<0.5a0',\n 'djangorestframework>=3.1,<3.2a0',\n 'django-selectable==0.8.0',\n 'django-versatileimagefield>=1.0.1,<1.1a0',\n 'fake-factory>=0.3.2',\n 'google-measurement-protocol>=0.1.2,<0.2a0',\n 'jsonfield>=1.0.3',\n 'Markdown>=2.4',\n 'prices>=0.5,<0.6a0',\n 'requests>=1.2.0',\n 'satchless>=1.1.2,<1.2a0',\n 'unidecode'\n ],\n extras_require={\n 'PaaS': [\n 'whitenoise==1.0.6',\n 'gunicorn==19.2.1',\n 'psycopg2==2.6']},\n cmdclass={\n 'test': PyTest},\n entry_points={\n 'console_scripts': ['saleor = saleor:manage']},\n tests_require=[\n 'mock==1.0.1',\n 'purl>=0.4.1',\n 'pytest',\n 'pytest-django'])\n",
"path": "setup.py"
}
] | [
{
"content": "#! /usr/bin/env python\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\nimport os\nimport sys\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'saleor.settings')\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n #import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\n\nsetup(\n name='saleor',\n author='Mirumee Software',\n author_email='[email protected]',\n description=\"A fork'n'play e-commerce in Django\",\n license='BSD',\n version='0.1.0a0',\n url='http://getsaleor.com/',\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'Babel>=1.3,<1.4a0',\n 'BabelDjango>=0.2,<0.3a0',\n 'Django>=1.8',\n 'dj_database_url>=0.3.0',\n 'django-emailit>=0.2.2',\n 'django-materializecss-form==0.0.64',\n 'django-model-utils>=2.0.0,<2.1a0',\n 'django-mptt>=0.7.1',\n 'django-offsite-storage>=0.0.5',\n 'django-payments>=0.7.0,<0.8a0',\n 'django-prices>=0.4.0,<0.5a0',\n 'djangorestframework>=3.1,<3.2a0',\n 'django-selectable==0.8.0',\n 'django-versatileimagefield>=1.0.1,<1.1a0',\n 'fake-factory>=0.3.2',\n 'google-measurement-protocol>=0.1.2,<0.2a0',\n 'jsonfield>=1.0.3',\n 'Markdown>=2.4',\n 'prices>=0.5,<0.6a0',\n 'requests>=1.2.0',\n 'satchless>=1.1.2,<1.2a0',\n 'unidecode'\n ],\n extras_require={\n 'PaaS': [\n 'whitenoise==1.0.6',\n 'gunicorn==19.2.1',\n 'psycopg2==2.6']},\n cmdclass={\n 'test': PyTest},\n entry_points={\n 'console_scripts': ['saleor = saleor:manage']},\n tests_require=[\n 'mock==1.3.0',\n 'purl>=0.4.1',\n 'pytest',\n 'pytest-django'])\n",
"path": "setup.py"
}
] | diff --git a/.coveragerc b/.coveragerc
index 658f243ddb8..038b9790754 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,6 +1,8 @@
[run]
branch = 1
-omit = */test_*.py
+omit =
+ */migrations/*
+ */test_*.py
source = saleor
[report]
diff --git a/.travis.yml b/.travis.yml
index dc05305a059..c8f84a47a18 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,13 +1,20 @@
language: python
sudo: false
-python:
- - 2.7
- - 3.3
- - 3.4
install:
- - python setup.py install
- - pip install coverage
- - pip install codecov
-script: coverage run setup.py test
+ - pip install tox codecov
+script:
+ - tox
+env:
+ - TOXENV=py27-django18
+ - TOXENV=py27-django_master
+ - TOXENV=py33-django18
+ - TOXENV=py33-django_master
+ - TOXENV=py34-django18
+ - TOXENV=py34-django_master
+matrix:
+ allow_failures:
+ - env: TOXENV=py27-django_master
+ - env: TOXENV=py33-django_master
+ - env: TOXENV=py34-django_master
after_success:
- codecov
diff --git a/pytest.ini b/pytest.ini
deleted file mode 100644
index 03393bdaf91..00000000000
--- a/pytest.ini
+++ /dev/null
@@ -1,6 +0,0 @@
-[pytest]
-norecursedirs =
- .*
- build
- node_modules
-DJANGO_SETTINGS_MODULE = saleor.test_settings
diff --git a/saleor/registration/test_registration.py b/saleor/registration/test_registration.py
index ade33860e8c..dd4e78c5a6a 100644
--- a/saleor/registration/test_registration.py
+++ b/saleor/registration/test_registration.py
@@ -141,7 +141,15 @@ def test_token_is_obtained_on_construction(self):
"""OAuth2 client asks for access token if interim code is available"""
self.access_token_response.status_code = sentinel.ok
Client(local_host='http://localhost', code=sentinel.code)
- self.requests_mock.post.assert_called_once()
+ self.requests_mock.post.assert_called_once_with(
+ sentinel.token_uri,
+ data={'grant_type': 'authorization_code',
+ 'client_id': sentinel.client_id,
+ 'client_secret': sentinel.client_secret,
+ 'code': sentinel.code,
+ 'redirect_uri': sentinel.redirect_uri,
+ 'scope': sentinel.scope},
+ auth=None)
def test_token_success(self):
"""OAuth2 client properly obtains access token"""
diff --git a/setup.py b/setup.py
index 34d2d7d3ff2..27cbfeaf758 100755
--- a/setup.py
+++ b/setup.py
@@ -70,7 +70,7 @@ def run_tests(self):
entry_points={
'console_scripts': ['saleor = saleor:manage']},
tests_require=[
- 'mock==1.0.1',
+ 'mock==1.3.0',
'purl>=0.4.1',
'pytest',
'pytest-django'])
diff --git a/tox.ini b/tox.ini
index e52640ac7a7..38947fcc0cc 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,10 +1,20 @@
[tox]
-envlist = py27,py33,py34
+envlist = py{27,33,34}-django{18,_master}
[testenv]
-deps=
- mock
- purl
+deps =
+ django18: django>=1.8,<1.9
+ django_master: https://github.com/django/django/archive/master.tar.gz
+ mock==1.3.0
+ purl>=0.4.1
pytest
pytest-django
-commands=py.test
+ coverage
+commands=coverage run setup.py test
+
+[pytest]
+norecursedirs =
+ .*
+ build
+ node_modules
+DJANGO_SETTINGS_MODULE = saleor.test_settings
|
privacyidea__privacyidea-1247 | Audit log does take administrative realms into account
The policy ``auditlog`` in scope admin does not honor the administrative realms.
https://github.com/privacyidea/privacyidea/blob/4b8832dfa99d54d8c790cc3b682f08d9c23388fd/privacyidea/api/lib/prepolicy.py#L1313
| [
{
"content": "# -*- coding: utf-8 -*-\n#\n# 2017-04-22 Cornelius Kölbel <[email protected]>\n# Add wrapper for U2F token\n# 2017-01-18 Cornelius Kölbel <[email protected]>\n# Add token specific PIN policies based on\n# Quynh's pull request.\n# 2016-11-29 Cornelius Kölbel <[email protected]>\n# Add timelimit for audit entries\n# 2016-08-30 Cornelius Kölbel <[email protected]>\n# Add decorator to save the client type to the database\n# 2016-07-17 Cornelius Kölbel <[email protected]>\n# Add realmadmin decorator\n# 2016-05-18 Cornelius Kölbel <[email protected]>\n# Add resolver to check_base_action\n# 2016-04-29 Cornelius Kölbel <[email protected]>\n# Add init_token_defaults to set default parameters\n# during token init.\n# 2016-04-08 Cornelius Kölbel <[email protected]>\n# Avoid \"None\" as redundant 2nd argument\n# 2015-12-28 Cornelius Kölbel <[email protected]>\n# Add ACTION.REQUIREDEMAIL\n# 2015-12-12 Cornelius Kölbel <[email protected]>\n# Change eval to importlib\n# 2015-11-04 Cornelius Kölbel <[email protected]>\n# Add check for REMOTE_USER\n# 2015-04-13 Cornelius Kölbel <[email protected]>\n# Add hook for external decorator for init and assign\n# 2015-02-06 Cornelius Kölbel <[email protected]>\n# Create this module for enabling decorators for API calls\n#\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nThese are the policy decorators as PRE conditions for the API calls.\nI.e. these conditions are executed before the wrapped API call.\nThis module uses the policy base functions from\nprivacyidea.lib.policy but also components from flask like g.\n\nWrapping the functions in a decorator class enables easy modular testing.\n\nThe functions of this module are tested in tests/test_api_lib_policy.py\n\"\"\"\nimport logging\nlog = logging.getLogger(__name__)\nfrom privacyidea.lib.error import PolicyError, RegistrationError\nfrom flask import g, current_app\nfrom privacyidea.lib.policy import SCOPE, ACTION, PolicyClass\nfrom privacyidea.lib.user import (get_user_from_param, get_default_realm,\n split_user)\nfrom privacyidea.lib.token import (get_tokens, get_realms_of_token)\nfrom privacyidea.lib.utils import (generate_password, get_client_ip,\n parse_timedelta, is_true)\nfrom privacyidea.lib.auth import ROLE\nfrom privacyidea.api.lib.utils import getParam\nfrom privacyidea.lib.clientapplication import save_clientapplication\nfrom privacyidea.lib.config import (get_token_class, get_from_config, SYSCONF)\nimport functools\nimport jwt\nimport re\nimport importlib\n# Token specific imports!\nfrom privacyidea.lib.tokens.u2ftoken import (U2FACTION, parse_registration_data)\nfrom privacyidea.lib.tokens.u2f import x509name_to_string\n\noptional = True\nrequired = False\n\n\nclass prepolicy(object):\n \"\"\"\n This is the decorator wrapper to call a specific function before an API\n call.\n The prepolicy decorator is to be used in the API calls.\n A prepolicy decorator then will modify the request data or raise an\n exception\n \"\"\"\n def __init__(self, function, request, action=None):\n \"\"\"\n :param function: This is the policy function the is to be called\n :type function: function\n :param request: The original request object, that needs to be passed\n :type request: Request Object\n \"\"\"\n self.action = action\n self.request = request\n self.function = function\n\n def __call__(self, wrapped_function):\n \"\"\"\n This decorates the given function. The prepolicy decorator is ment\n for API functions on the API level.\n\n If some error occur the a PolicyException is raised.\n\n The decorator function can modify the request data.\n\n :param wrapped_function: The function, that is decorated.\n :type wrapped_function: API function\n :return: None\n \"\"\"\n @functools.wraps(wrapped_function)\n def policy_wrapper(*args, **kwds):\n self.function(request=self.request,\n action=self.action)\n return wrapped_function(*args, **kwds)\n\n return policy_wrapper\n\n\ndef init_random_pin(request=None, action=None):\n \"\"\"\n This policy function is to be used as a decorator in the API init function.\n If the policy is set accordingly it adds a random PIN to the\n request.all_data like.\n\n It uses the policy SCOPE.ENROLL, ACTION.OTPPINRANDOM to set a random OTP\n PIN during Token enrollment\n \"\"\"\n params = request.all_data\n policy_object = g.policy_object\n user_object = get_user_from_param(params)\n # get the length of the random PIN from the policies\n pin_pols = policy_object.get_action_values(action=ACTION.OTPPINRANDOM,\n scope=SCOPE.ENROLL,\n user=user_object.login,\n realm=user_object.realm,\n client=g.client_ip,\n unique=True)\n\n if len(pin_pols) == 1:\n log.debug(\"Creating random OTP PIN with length {0!s}\".format(pin_pols[0]))\n request.all_data[\"pin\"] = generate_password(size=int(pin_pols[0]))\n\n # handle the PIN\n handle_pols = policy_object.get_action_values(\n action=ACTION.PINHANDLING, scope=SCOPE.ENROLL,\n user=user_object.login, realm=user_object.realm,\n client=g.client_ip)\n # We can have more than one pin handler policy. So we can process the\n # PIN in several ways!\n for handle_pol in handle_pols:\n log.debug(\"Handle the random PIN with the class {0!s}\".format(handle_pol))\n packageName = \".\".join(handle_pol.split(\".\")[:-1])\n className = handle_pol.split(\".\")[-1:][0]\n mod = __import__(packageName, globals(), locals(), [className])\n pin_handler_class = getattr(mod, className)\n pin_handler = pin_handler_class()\n # Send the PIN\n pin_handler.send(request.all_data[\"pin\"],\n request.all_data.get(\"serial\", \"N/A\"),\n user_object,\n tokentype=request.all_data.get(\"type\", \"hotp\"),\n logged_in_user=g.logged_in_user)\n\n return True\n\n\ndef realmadmin(request=None, action=None):\n \"\"\"\n This decorator adds the first REALM to the parameters if the\n administrator, calling this API is a realm admin.\n This way, if the admin calls e.g. GET /user without realm parameter,\n he will not see all users, but only users in one of his realms.\n\n TODO: If a realm admin is allowed to see more than one realm,\n this is not handled at the moment. We need to change the underlying\n library functions!\n\n :param request: The HTTP reqeust\n :param action: The action like ACTION.USERLIST\n \"\"\"\n # This decorator is only valid for admins\n if g.logged_in_user.get(\"role\") == ROLE.ADMIN:\n params = request.all_data\n if not \"realm\" in params:\n # add the realm to params\n policy_object = g.policy_object\n po = policy_object.get_policies(\n action=action, scope=SCOPE.ADMIN,\n user=g.logged_in_user.get(\"username\"),\n adminrealm=g.logged_in_user.get(\"realm\"), client=g.client_ip,\n active=True)\n # TODO: fix this: there could be a list of policies with a list\n # of realms!\n if po and po[0].get(\"realm\"):\n request.all_data[\"realm\"] = po[0].get(\"realm\")[0]\n\n return True\n\n\ndef check_otp_pin(request=None, action=None):\n \"\"\"\n This policy function checks if the OTP PIN that is about to be set\n follows the OTP PIN policies ACTION.OTPPINMAXLEN, ACTION.OTPPINMINLEN and\n ACTION.OTPPINCONTENTS and token-type-specific PIN policy actions in the\n SCOPE.USER or SCOPE.ADMIN. It is used to decorate the API functions.\n\n The pin is investigated in the params as \"otppin\" or \"pin\"\n\n In case the given OTP PIN does not match the requirements an exception is\n raised.\n \"\"\"\n params = request.all_data\n realm = params.get(\"realm\")\n pin = params.get(\"otppin\", \"\") or params.get(\"pin\", \"\")\n serial = params.get(\"serial\")\n tokentype = params.get(\"type\")\n if not serial and action == ACTION.SETPIN:\n path_elems = request.path.split(\"/\")\n serial = path_elems[-1]\n # Also set it for later use\n request.all_data[\"serial\"] = serial\n if serial:\n # if this is a token, that does not use a pin, we ignore this check\n # And immediately return true\n tokensobject_list = get_tokens(serial=serial)\n if len(tokensobject_list) == 1:\n if tokensobject_list[0].using_pin is False:\n return True\n tokentype = tokensobject_list[0].token.tokentype\n # the default tokentype is still HOTP\n tokentype = tokentype or \"hotp\"\n policy_object = g.policy_object\n role = g.logged_in_user.get(\"role\")\n username = g.logged_in_user.get(\"username\")\n if role == ROLE.ADMIN:\n scope = SCOPE.ADMIN\n admin_realm = g.logged_in_user.get(\"realm\")\n realm = params.get(\"realm\", \"\")\n else:\n scope = SCOPE.USER\n realm = g.logged_in_user.get(\"realm\")\n admin_realm = None\n # get the policies for minimum length, maximum length and PIN contents\n # first try to get a token specific policy - otherwise fall back to\n # default policy\n pol_minlen = policy_object.get_action_values(\n action=\"{0!s}_{1!s}\".format(tokentype, ACTION.OTPPINMINLEN),\n scope=scope, user=username, realm=realm, adminrealm=admin_realm,\n client=g.client_ip, unique=True) or \\\n policy_object.get_action_values(\n action=ACTION.OTPPINMINLEN, scope=scope, user=username,\n realm=realm, adminrealm=admin_realm, client=g.client_ip,\n unique=True)\n\n pol_maxlen = policy_object.get_action_values(\n action=\"{0!s}_{1!s}\".format(tokentype, ACTION.OTPPINMAXLEN),\n scope=scope, user=username, realm=realm, adminrealm=admin_realm,\n client=g.client_ip, unique=True) or \\\n policy_object.get_action_values(\n action=ACTION.OTPPINMAXLEN, scope=scope, user=username,\n realm=realm, adminrealm=admin_realm, client=g.client_ip,\n unique=True)\n\n pol_contents = policy_object.get_action_values(\n action=\"{0!s}_{1!s}\".format(tokentype, ACTION.OTPPINCONTENTS),\n scope=scope, user=username, realm=realm, adminrealm=admin_realm,\n client=g.client_ip, unique=True) or \\\n policy_object.get_action_values(\n action=ACTION.OTPPINCONTENTS, scope=scope,\n user=username, realm=realm, adminrealm=admin_realm,\n client=g.client_ip, unique=True)\n\n if len(pol_minlen) == 1 and len(pin) < int(pol_minlen[0]):\n # check the minimum length requirement\n raise PolicyError(\"The minimum OTP PIN length is {0!s}\".format(\n pol_minlen[0]))\n\n if len(pol_maxlen) == 1 and len(pin) > int(pol_maxlen[0]):\n # check the maximum length requirement\n raise PolicyError(\"The maximum OTP PIN length is {0!s}\".format(\n pol_maxlen[0]))\n\n if len(pol_contents) == 1:\n # check the contents requirement\n chars = \"[a-zA-Z]\" # c\n digits = \"[0-9]\" # n\n special = \"[.:,;_<>+*!/()=?$§%&#~\\^-]\" # s\n no_others = False\n grouping = False\n\n if pol_contents[0] == \"-\":\n no_others = True\n pol_contents = pol_contents[1:]\n elif pol_contents[0] == \"+\":\n grouping = True\n pol_contents = pol_contents[1:]\n # TODO implement grouping and substraction\n if \"c\" in pol_contents[0] and not re.search(chars, pin):\n raise PolicyError(\"Missing character in PIN: {0!s}\".format(chars))\n if \"n\" in pol_contents[0] and not re.search(digits, pin):\n raise PolicyError(\"Missing character in PIN: {0!s}\".format(digits))\n if \"s\" in pol_contents[0] and not re.search(special, pin):\n raise PolicyError(\"Missing character in PIN: {0!s}\".format(special))\n\n return True\n\n\ndef papertoken_count(request=None, action=None):\n \"\"\"\n This is a token specific wrapper for paper token for the endpoint\n /token/init.\n According to the policy scope=SCOPE.ENROLL,\n action=PAPERACTION.PAPER_COUNT it sets the parameter papertoken_count to\n enroll a paper token with such many OTP values.\n\n :param request:\n :param action:\n :return:\n \"\"\"\n from privacyidea.lib.tokens.papertoken import PAPERACTION\n user_object = request.User\n policy_object = g.policy_object\n pols = policy_object.get_action_values(\n action=PAPERACTION.PAPERTOKEN_COUNT,\n scope=SCOPE.ENROLL,\n user=user_object.login,\n resolver=user_object.resolver,\n realm=user_object.realm,\n client=g.client_ip,\n unique=True)\n\n if pols:\n papertoken_count = pols[0]\n request.all_data[\"papertoken_count\"] = papertoken_count\n\n return True\n\n\ndef tantoken_count(request=None, action=None):\n \"\"\"\n This is a token specific wrapper for tan token for the endpoint\n /token/init.\n According to the policy scope=SCOPE.ENROLL,\n action=TANACTION.TANTOKEN_COUNT it sets the parameter tantoken_count to\n enroll a tan token with such many OTP values.\n\n :param request:\n :param action:\n :return:\n \"\"\"\n from privacyidea.lib.tokens.tantoken import TANACTION\n user_object = request.User\n policy_object = g.policy_object\n pols = policy_object.get_action_values(\n action=TANACTION.TANTOKEN_COUNT,\n scope=SCOPE.ENROLL,\n user=user_object.login,\n resolver=user_object.resolver,\n realm=user_object.realm,\n client=g.client_ip,\n unique=True)\n\n if pols:\n tantoken_count = pols[0]\n request.all_data[\"tantoken_count\"] = tantoken_count\n\n return True\n\n\ndef encrypt_pin(request=None, action=None):\n \"\"\"\n This policy function is to be used as a decorator for several API functions.\n E.g. token/assign, token/setpin, token/init\n If the policy is set to define the PIN to be encrypted,\n the request.all_data is modified like this:\n encryptpin = True\n\n It uses the policy SCOPE.ENROLL, ACTION.ENCRYPTPIN\n \"\"\"\n params = request.all_data\n policy_object = g.policy_object\n user_object = get_user_from_param(params)\n # get the length of the random PIN from the policies\n pin_pols = policy_object.get_policies(action=ACTION.ENCRYPTPIN,\n scope=SCOPE.ENROLL,\n user=user_object.login,\n realm=user_object.realm,\n client=g.client_ip,\n active=True)\n\n if pin_pols:\n request.all_data[\"encryptpin\"] = \"True\"\n else:\n if \"encryptpin\" in request.all_data:\n del request.all_data[\"encryptpin\"]\n\n return True\n\n\ndef enroll_pin(request=None, action=None):\n \"\"\"\n This policy function is used as decorator for init token.\n It checks, if the user or the admin is allowed to set a token PIN during\n enrollment. If not, it deleted the PIN from the request.\n \"\"\"\n policy_object = g.policy_object\n role = g.logged_in_user.get(\"role\")\n if role == ROLE.USER:\n scope = SCOPE.USER\n username = g.logged_in_user.get(\"username\")\n realm = g.logged_in_user.get(\"realm\")\n adminrealm = None\n else:\n scope = SCOPE.ADMIN\n username = g.logged_in_user.get(\"username\")\n realm = getParam(request.all_data, \"realm\")\n adminrealm = g.logged_in_user.get(\"realm\")\n pin_pols = policy_object.get_policies(action=ACTION.ENROLLPIN,\n scope=scope,\n user=username,\n realm=realm,\n adminrealm=adminrealm,\n client=g.client_ip,\n active=True)\n action_at_all = policy_object.get_policies(scope=scope,\n active=True,\n all_times=True)\n\n if action_at_all and not pin_pols:\n # Not allowed to set a PIN during enrollment!\n if \"pin\" in request.all_data:\n del request.all_data[\"pin\"]\n return True\n\n\ndef init_token_defaults(request=None, action=None):\n \"\"\"\n This policy function is used as a decorator for the API init function.\n Depending on policy settings it can add token specific default values\n like totp_hashlib, hotp_hashlib, totp_otplen...\n \"\"\"\n params = request.all_data\n ttype = params.get(\"type\") or \"hotp\"\n token_class = get_token_class(ttype)\n default_settings = token_class.get_default_settings(params,\n g.logged_in_user,\n g.policy_object,\n g.client_ip)\n log.debug(\"Adding default settings {0!s} for token type {1!s}\".format(\n default_settings, ttype))\n request.all_data.update(default_settings)\n return True\n\n\ndef init_tokenlabel(request=None, action=None):\n \"\"\"\n This policy function is to be used as a decorator in the API init function.\n It adds the tokenlabel definition to the params like this:\n params : { \"tokenlabel\": \"<u>@<r>\" }\n\n In addtion it adds the tokenissuer to the params like this:\n params : { \"tokenissuer\": \"privacyIDEA instance\" }\n\n It uses the policy SCOPE.ENROLL, ACTION.TOKENLABEL and ACTION.TOKENISSUER\n to set the tokenlabel and tokenissuer\n of Smartphone tokens during enrollment and this fill the details of the\n response.\n \"\"\"\n params = request.all_data\n policy_object = g.policy_object\n user_object = get_user_from_param(params)\n # get the serials from a policy definition\n label_pols = policy_object.get_action_values(action=ACTION.TOKENLABEL,\n scope=SCOPE.ENROLL,\n user=user_object.login,\n realm=user_object.realm,\n client=g.client_ip,\n unique=True,\n allow_white_space_in_action=True)\n\n if len(label_pols) == 1:\n # The policy was set, so we need to set the tokenlabel in the request.\n request.all_data[\"tokenlabel\"] = label_pols[0]\n\n issuer_pols = policy_object.get_action_values(action=ACTION.TOKENISSUER,\n scope=SCOPE.ENROLL,\n user=user_object.login,\n realm=user_object.realm,\n client=g.client_ip,\n unique=True,\n allow_white_space_in_action=True)\n if len(issuer_pols) == 1:\n request.all_data[\"tokenissuer\"] = issuer_pols[0]\n\n return True\n\n\ndef twostep_enrollment_activation(request=None, action=None):\n \"\"\"\n This policy function enables the two-step enrollment process according\n to the configured policies.\n It is used to decorate the ``/token/init`` endpoint.\n\n If a ``<type>_2step`` policy matches, the ``2stepinit`` parameter is handled according to the policy.\n If no policy matches, the ``2stepinit`` parameter is removed from the request data.\n \"\"\"\n policy_object = g.policy_object\n user_object = get_user_from_param(request.all_data)\n serial = getParam(request.all_data, \"serial\", optional)\n token_type = getParam(request.all_data, \"type\", optional, \"hotp\")\n token_exists = False\n if serial:\n tokensobject_list = get_tokens(serial=serial)\n if len(tokensobject_list) == 1:\n token_type = tokensobject_list[0].token.tokentype\n token_exists = True\n token_type = token_type.lower()\n role = g.logged_in_user.get(\"role\")\n # Differentiate between an admin enrolling a token for the\n # user and a user self-enrolling a token.\n if role == ROLE.ADMIN:\n scope = SCOPE.ADMIN\n adminrealm = g.logged_in_user.get(\"realm\")\n else:\n scope = SCOPE.USER\n adminrealm = None\n realm = user_object.realm\n # In any case, the policy's user attribute is matched against the\n # currently logged-in user (which may be the admin or the\n # self-enrolling user).\n user = g.logged_in_user.get(\"username\")\n # Tokentypes have separate twostep actions\n action = \"{}_2step\".format(token_type)\n twostep_enabled_pols = policy_object.get_action_values(action=action,\n scope=scope,\n unique=True,\n user=user,\n realm=realm,\n client=g.client_ip,\n adminrealm=adminrealm)\n if twostep_enabled_pols:\n enabled_setting = twostep_enabled_pols[0]\n if enabled_setting == \"allow\":\n # The user is allowed to pass 2stepinit=1\n pass\n elif enabled_setting == \"force\":\n # We force 2stepinit to be 1 (if the token does not exist yet)\n if not token_exists:\n request.all_data[\"2stepinit\"] = 1\n else:\n raise PolicyError(\"Unknown 2step policy setting: {}\".format(enabled_setting))\n else:\n # If no policy matches, the user is not allowed\n # to pass 2stepinit\n # Force two-step initialization to be None\n if \"2stepinit\" in request.all_data:\n del request.all_data[\"2stepinit\"]\n return True\n\n\ndef twostep_enrollment_parameters(request=None, action=None):\n \"\"\"\n If the ``2stepinit`` parameter is set to true, this policy function\n reads additional configuration from policies and adds it\n to ``request.all_data``, that is:\n\n * ``{type}_2step_serversize`` is written to ``2step_serversize``\n * ``{type}_2step_clientsize`` is written to ``2step_clientsize`\n * ``{type}_2step_difficulty`` is written to ``2step_difficulty``\n\n If no policy matches, the value passed by the user is kept.\n\n This policy function is used to decorate the ``/token/init`` endpoint.\n \"\"\"\n policy_object = g.policy_object\n user_object = get_user_from_param(request.all_data)\n serial = getParam(request.all_data, \"serial\", optional)\n token_type = getParam(request.all_data, \"type\", optional, \"hotp\")\n if serial:\n tokensobject_list = get_tokens(serial=serial)\n if len(tokensobject_list) == 1:\n token_type = tokensobject_list[0].token.tokentype\n token_type = token_type.lower()\n role = g.logged_in_user.get(\"role\")\n # Differentiate between an admin enrolling a token for the\n # user and a user self-enrolling a token.\n if role == ROLE.ADMIN:\n adminrealm = g.logged_in_user.get(\"realm\")\n else:\n adminrealm = None\n realm = user_object.realm\n # In any case, the policy's user attribute is matched against the\n # currently logged-in user (which may be the admin or the\n # self-enrolling user).\n user = g.logged_in_user.get(\"username\")\n # Tokentypes have separate twostep actions\n if is_true(getParam(request.all_data, \"2stepinit\", optional)):\n parameters = (\"2step_serversize\", \"2step_clientsize\", \"2step_difficulty\")\n for parameter in parameters:\n action = u\"{}_{}\".format(token_type, parameter)\n action_values = policy_object.get_action_values(action=action,\n scope=SCOPE.ENROLL,\n unique=True,\n user=user,\n realm=realm,\n client=g.client_ip,\n adminrealm=adminrealm)\n if action_values:\n request.all_data[parameter] = action_values[0]\n\n\ndef check_max_token_user(request=None, action=None):\n \"\"\"\n Pre Policy\n This checks the maximum token per user policy.\n Check ACTION.MAXTOKENUSER\n\n This decorator can wrap:\n /token/init (with a realm and user)\n /token/assign\n\n :param req:\n :param action:\n :return: True otherwise raises an Exception\n \"\"\"\n ERROR = \"The number of tokens for this user is limited!\"\n params = request.all_data\n user_object = get_user_from_param(params)\n serial = getParam(params, \"serial\")\n if user_object.login:\n policy_object = g.policy_object\n limit_list = policy_object.get_action_values(ACTION.MAXTOKENUSER,\n scope=SCOPE.ENROLL,\n realm=user_object.realm,\n user=user_object.login,\n client=g.client_ip)\n if limit_list:\n # we need to check how many tokens the user already has assigned!\n tokenobject_list = get_tokens(user=user_object)\n if serial and serial in [tok.token.serial for tok in tokenobject_list]:\n # If a serial is provided and this token already exists, the\n # token can be regenerated\n return True\n already_assigned_tokens = len(tokenobject_list)\n if already_assigned_tokens >= max([int(x) for x in limit_list]):\n raise PolicyError(ERROR)\n return True\n\n\ndef check_max_token_realm(request=None, action=None):\n \"\"\"\n Pre Policy\n This checks the maximum token per realm.\n Check ACTION.MAXTOKENREALM\n\n This decorator can wrap:\n /token/init (with a realm and user)\n /token/assign\n /token/tokenrealms\n\n :param req: The request that is intercepted during the API call\n :type req: Request Object\n :param action: An optional Action\n :type action: basestring\n :return: True otherwise raises an Exception\n \"\"\"\n ERROR = \"The number of tokens in this realm is limited!\"\n params = request.all_data\n user_object = get_user_from_param(params)\n if user_object:\n realm = user_object.realm\n else: # pragma: no cover\n realm = params.get(\"realm\")\n\n if realm:\n policy_object = g.policy_object\n limit_list = policy_object.get_action_values(ACTION.MAXTOKENREALM,\n scope=SCOPE.ENROLL,\n realm=realm,\n client=g.client_ip)\n if limit_list:\n # we need to check how many tokens the realm already has assigned!\n tokenobject_list = get_tokens(realm=realm)\n already_assigned_tokens = len(tokenobject_list)\n if already_assigned_tokens >= max([int(x) for x in limit_list]):\n raise PolicyError(ERROR)\n return True\n\n\ndef set_realm(request=None, action=None):\n \"\"\"\n Pre Policy\n This pre condition gets the current realm and verifies if the realm\n should be rewritten due to the policy definition.\n I takes the realm from the request and - if a policy matches - replaces\n this realm with the realm defined in the policy\n\n Check ACTION.SETREALM\n\n This decorator should wrap\n /validate/check\n\n :param request: The request that is intercepted during the API call\n :type request: Request Object\n :param action: An optional Action\n :type action: basestring\n :returns: Always true. Modified the parameter request\n \"\"\"\n #user_object = get_user_from_param(request.all_data)\n user_object = request.User\n # At the moment a realm parameter with no user parameter returns a user\n # object like \"@realm\". If this is changed one day, we need to also fetch\n # the realm\n if user_object:\n realm = user_object.realm\n username = user_object.login\n else: # pragma: no cover\n realm = request.all_data.get(\"realm\")\n username = None\n\n policy_object = g.policy_object\n new_realm = policy_object.get_action_values(ACTION.SETREALM,\n scope=SCOPE.AUTHZ,\n user=username,\n realm=realm,\n client=g.client_ip)\n if len(new_realm) > 1:\n raise PolicyError(\"I do not know, to which realm I should set the \"\n \"new realm. Conflicting policies exist.\")\n elif len(new_realm) == 1:\n # There is one specific realm, which we set in the request\n request.all_data[\"realm\"] = new_realm[0]\n\n return True\n\n\ndef required_email(request=None, action=None):\n \"\"\"\n This precondition checks if the \"email\" parameter matches the regular\n expression in the policy scope=register, action=requiredemail.\n See :ref:`policy_requiredemail`.\n\n Check ACTION.REQUIREDEMAIL\n\n This decorator should wrap POST /register\n\n :param request: The Request Object\n :param action: An optional Action\n :return: Modifies the request parameters or raises an Exception\n \"\"\"\n email = getParam(request.all_data, \"email\")\n email_found = False\n email_pols = g.policy_object.\\\n get_action_values(ACTION.REQUIREDEMAIL, scope=SCOPE.REGISTER,\n client=g.client_ip)\n if email and email_pols:\n for email_pol in email_pols:\n # The policy is only \"/regularexpr/\".\n search = email_pol.strip(\"/\")\n if re.findall(search, email):\n email_found = True\n if not email_found:\n raise RegistrationError(\"This email address is not allowed to \"\n \"register!\")\n\n return True\n\n\ndef auditlog_age(request=None, action=None):\n \"\"\"\n This pre condition checks for the policy auditlog_age and set the\n \"timelimit\" parameter of the audit search API.\n\n Check ACTION.AUDIT_AGE\n\n The decorator can wrap GET /audit/\n\n :param request: The request that is intercepted during the API call\n :type request: Request Object\n :param action: An optional Action\n :type action: basestring\n :returns: Always true. Modified the parameter request\n \"\"\"\n user_object = request.User\n policy_object = g.policy_object\n role = g.logged_in_user.get(\"role\")\n if role == ROLE.ADMIN:\n scope = SCOPE.ADMIN\n adminrealm = g.logged_in_user.get(\"realm\")\n user = g.logged_in_user.get(\"username\")\n realm = user_object.realm\n else:\n scope = SCOPE.USER\n adminrealm = None\n user = user_object.login\n realm = user_object.realm\n\n audit_age = policy_object.get_action_values(ACTION.AUDIT_AGE,\n scope=scope,\n adminrealm=adminrealm,\n realm=realm,\n user=user,\n client=g.client_ip,\n unique=True)\n timelimit = None\n timelimit_s = None\n for aa in audit_age:\n if not timelimit:\n timelimit_s = aa\n timelimit = parse_timedelta(timelimit_s)\n else:\n # We will use the longest allowed timelimit\n if parse_timedelta(aa) > timelimit:\n timelimit_s = aa\n timelimit = parse_timedelta(timelimit_s)\n\n log.debug(\"auditlog_age: {0!s}\".format(timelimit_s))\n request.all_data[\"timelimit\"] = timelimit_s\n\n return True\n\n\ndef mangle(request=None, action=None):\n \"\"\"\n This pre condition checks if either of the parameters pass, user or realm\n in a validate/check request should be rewritten based on an\n authentication policy with action \"mangle\".\n See :ref:`policy_mangle` for an example.\n\n Check ACTION.MANGLE\n\n This decorator should wrap\n /validate/check\n\n :param request: The request that is intercepted during the API call\n :type request: Request Object\n :param action: An optional Action\n :type action: basestring\n :returns: Always true. Modified the parameter request\n \"\"\"\n user_object = request.User\n\n policy_object = g.policy_object\n mangle_pols = policy_object.get_action_values(ACTION.MANGLE,\n scope=SCOPE.AUTH,\n realm=user_object.realm,\n user=user_object.login,\n client=g.client_ip)\n # We can have several mangle policies! One for user, one for realm and\n # one for pass. So we do no checking here.\n for mangle_pol_action in mangle_pols:\n # mangle_pol_action looks like this:\n # keyword/search/replace/. Where \"keyword\" can be \"user\", \"pass\" or\n # \"realm\".\n mangle_key, search, replace, _rest = mangle_pol_action.split(\"/\", 3)\n mangle_value = request.all_data.get(mangle_key)\n if mangle_value:\n log.debug(\"mangling authentication data: {0!s}\".format(mangle_key))\n request.all_data[mangle_key] = re.sub(search, replace,\n mangle_value)\n if mangle_key in [\"user\", \"realm\"]:\n request.User = get_user_from_param(request.all_data)\n return True\n\n\ndef check_anonymous_user(request=None, action=None):\n \"\"\"\n This decorator function takes the request and verifies the given action\n for the SCOPE USER without an authenticated user but the user from the\n parameters.\n\n This is used with password_reset\n\n :param request:\n :param action:\n :return: True otherwise raises an Exception\n \"\"\"\n ERROR = \"User actions are defined, but this action is not allowed!\"\n params = request.all_data\n policy_object = g.policy_object\n scope = SCOPE.USER\n user_obj = get_user_from_param(params)\n username = user_obj.login\n realm = user_obj.realm\n\n action = policy_object.get_policies(action=action,\n user=username,\n realm=realm,\n scope=scope,\n client=g.client_ip,\n adminrealm=None,\n active=True)\n action_at_all = policy_object.get_policies(scope=scope,\n active=True,\n all_times=True)\n if action_at_all and len(action) == 0:\n raise PolicyError(ERROR)\n return True\n\n\ndef check_base_action(request=None, action=None, anonymous=False):\n \"\"\"\n This decorator function takes the request and verifies the given action\n for the SCOPE ADMIN or USER.\n :param request:\n :param action:\n :param anonymous: If set to True, the user data is taken from the request\n parameters.\n :return: True otherwise raises an Exception\n \"\"\"\n ERROR = {\"user\": \"User actions are defined, but the action %s is not \"\n \"allowed!\" % action,\n \"admin\": \"Admin actions are defined, but the action %s is not \"\n \"allowed!\" % action}\n params = request.all_data\n policy_object = g.policy_object\n username = g.logged_in_user.get(\"username\")\n role = g.logged_in_user.get(\"role\")\n scope = SCOPE.ADMIN\n admin_realm = g.logged_in_user.get(\"realm\")\n realm = None\n resolver = None\n\n if role == ROLE.USER:\n scope = SCOPE.USER\n # Reset the admin realm\n admin_realm = None\n realm = realm or g.logged_in_user.get(\"realm\")\n\n # In certain cases we can not resolve the user by the serial!\n if action not in [ACTION.AUDIT]:\n realm = params.get(\"realm\")\n if type(realm) == list and len(realm) == 1:\n realm = realm[0]\n resolver = params.get(\"resolver\")\n # get the realm by the serial:\n if not realm and params.get(\"serial\"):\n realm = get_realms_of_token(params.get(\"serial\"),\n only_first_realm=True)\n\n # get the realm by the serial, while the serial is part of the URL like\n # DELETE /token/serial\n if not realm and request.view_args and request.view_args.get(\"serial\"):\n realm = get_realms_of_token(request.view_args.get(\"serial\"),\n only_first_realm=True)\n\n action = policy_object.get_policies(action=action,\n user=username,\n realm=realm,\n scope=scope,\n resolver=resolver,\n client=g.client_ip,\n adminrealm=admin_realm,\n active=True)\n action_at_all = policy_object.get_policies(scope=scope,\n active=True,\n all_times=True)\n if action_at_all and len(action) == 0:\n raise PolicyError(ERROR.get(role))\n return True\n\n\ndef check_token_upload(request=None, action=None):\n \"\"\"\n This decorator function takes the request and verifies the given action\n for scope ADMIN\n :param req:\n :param filename:\n :return:\n \"\"\"\n params = request.all_data\n policy_object = g.policy_object\n username = g.logged_in_user.get(\"username\")\n admin_realm = g.logged_in_user.get(\"realm\")\n action = policy_object.get_policies(action=ACTION.IMPORT,\n user=username,\n realm=params.get(\"realm\"),\n scope=SCOPE.ADMIN,\n client=g.client_ip,\n adminrealm=admin_realm,\n active=True)\n action_at_all = policy_object.get_policies(scope=SCOPE.ADMIN,\n active=True, all_times=True)\n if action_at_all and len(action) == 0:\n raise PolicyError(\"Admin actions are defined, but you are not allowed\"\n \" to upload token files.\")\n return True\n\n\ndef check_token_init(request=None, action=None):\n \"\"\"\n This decorator function takes the request and verifies\n if the requested tokentype is allowed to be enrolled in the SCOPE ADMIN\n or the SCOPE USER.\n :param request:\n :param action:\n :return: True or an Exception is raised\n \"\"\"\n ERROR = {\"user\": \"User actions are defined, you are not allowed to \"\n \"enroll this token type!\",\n \"admin\": \"Admin actions are defined, but you are not allowed to \"\n \"enroll this token type!\"}\n params = request.all_data\n policy_object = g.policy_object\n username = g.logged_in_user.get(\"username\")\n role = g.logged_in_user.get(\"role\")\n admin_realm = g.logged_in_user.get(\"realm\")\n scope = SCOPE.ADMIN\n if role == ROLE.USER:\n scope = SCOPE.USER\n admin_realm = None\n tokentype = params.get(\"type\", \"HOTP\")\n action = \"enroll{0!s}\".format(tokentype.upper())\n action = policy_object.get_policies(action=action,\n user=username,\n realm=params.get(\"realm\"),\n scope=scope,\n client=g.client_ip,\n adminrealm=admin_realm,\n active=True)\n action_at_all = policy_object.get_policies(scope=scope, active=True,\n all_times=True)\n if action_at_all and len(action) == 0:\n raise PolicyError(ERROR.get(role))\n return True\n\n\ndef check_external(request=None, action=\"init\"):\n \"\"\"\n This decorator is a hook to an external check function, that is called\n before the token/init or token/assign API.\n\n :param request: The REST request\n :type request: flask Request object\n :param action: This is either \"init\" or \"assign\"\n :type action: basestring\n :return: either True or an Exception is raised\n \"\"\"\n function_name = None\n module = None\n try:\n module_func = current_app.config.get(\"PI_INIT_CHECK_HOOK\")\n if module_func:\n module_name = \".\".join(module_func.split(\".\")[:-1])\n module = importlib.import_module(module_name)\n function_name = module_func.split(\".\")[-1]\n except Exception as exx:\n log.error(\"Error importing external check function: {0!s}\".format(exx))\n\n # Import of function was successful\n if function_name:\n external_func = getattr(module, function_name)\n external_func(request, action)\n return True\n\n\ndef api_key_required(request=None, action=None):\n \"\"\"\n This is a decorator for check_user_pass and check_serial_pass.\n It checks, if a policy scope=auth, action=apikeyrequired is set.\n If so, the validate request will only performed, if a JWT token is passed\n with role=validate.\n \"\"\"\n ERROR = \"The policy requires an API key to authenticate, \" \\\n \"but no key was passed.\"\n params = request.all_data\n policy_object = g.policy_object\n #user_object = get_user_from_param(params)\n user_object = request.User\n\n # Get the policies\n action = policy_object.get_policies(action=ACTION.APIKEY,\n user=user_object.login,\n realm=user_object.realm,\n scope=SCOPE.AUTHZ,\n client=g.client_ip,\n active=True)\n # Do we have a policy?\n if action:\n # check if we were passed a correct JWT\n # Get the Authorization token from the header\n auth_token = request.headers.get('PI-Authorization')\n if not auth_token:\n auth_token = request.headers.get('Authorization')\n try:\n r = jwt.decode(auth_token, current_app.secret_key)\n g.logged_in_user = {\"username\": r.get(\"username\", \"\"),\n \"realm\": r.get(\"realm\", \"\"),\n \"role\": r.get(\"role\", \"\")}\n except (AttributeError, jwt.DecodeError):\n # PyJWT 1.3.0 raises AttributeError, PyJWT 1.6.4 raises DecodeError.\n raise PolicyError(\"No valid API key was passed.\")\n\n role = g.logged_in_user.get(\"role\")\n if role != ROLE.VALIDATE:\n raise PolicyError(\"A correct JWT was passed, but it was no API \"\n \"key.\")\n\n # If everything went fine, we call the original function\n return True\n\n\ndef mock_success(req, action):\n \"\"\"\n This is a mock function as an example for check_external. This function\n returns success and the API call will go on unmodified.\n \"\"\"\n return True\n\n\ndef mock_fail(req, action):\n \"\"\"\n This is a mock function as an example for check_external. This function\n creates a problem situation and the token/init or token/assign will show\n this exception accordingly.\n \"\"\"\n raise Exception(\"This is an Exception in an external check function\")\n\n\ndef is_remote_user_allowed(req):\n \"\"\"\n Checks if the REMOTE_USER server variable is allowed to be used.\n\n .. note:: This is not used as a decorator!\n\n :param req: The flask request, containing the remote user and the client IP\n :return:\n \"\"\"\n res = False\n if req.remote_user:\n loginname, realm = split_user(req.remote_user)\n realm = realm or get_default_realm()\n\n # Check if the remote user is allowed\n if \"client_ip\" not in g:\n g.client_ip = get_client_ip(req,\n get_from_config(SYSCONF.OVERRIDECLIENT))\n if \"policy_object\" not in g:\n g.policy_object = PolicyClass()\n ruser_active = g.policy_object.get_action_values(ACTION.REMOTE_USER,\n scope=SCOPE.WEBUI,\n user=loginname,\n realm=realm,\n client=g.client_ip)\n\n res = ruser_active\n\n return res\n\n\ndef save_client_application_type(request, action):\n \"\"\"\n This decorator is used to write the client IP and the HTTP user agent (\n clienttype) to the database.\n\n In fact this is not a **policy** decorator, as it checks no policy. In\n fact, we could however one day\n define this as a policy, too.\n :param req:\n :return:\n \"\"\"\n # retrieve the IP. This will also be the mapped IP!\n client_ip = g.client_ip or \"0.0.0.0\"\n # ...and the user agent.\n ua = request.user_agent\n save_clientapplication(client_ip, \"{0!s}\".format(ua) or \"unknown\")\n return True\n\n\ndef u2ftoken_verify_cert(request, action):\n \"\"\"\n This is a token specific wrapper for u2f token for the endpoint\n /token/init\n According to the policy scope=SCOPE.ENROLL,\n action=U2FACTION.NO_VERIFY_CERT it can add a parameter to the\n enrollment parameters to not verify the attestation certificate.\n The default is to verify the cert.\n :param request:\n :param action:\n :return:\n \"\"\"\n # Get the registration data of the 2nd step of enrolling a U2F device\n ttype = request.all_data.get(\"type\")\n if ttype and ttype.lower() == \"u2f\":\n policy_object = g.policy_object\n # Add the default to verify the cert.\n request.all_data[\"u2f.verify_cert\"] = True\n user_object = request.User\n\n if user_object:\n token_user = user_object.login\n token_realm = user_object.realm\n token_resolver = user_object.resolver\n else:\n token_realm = token_resolver = token_user = None\n\n do_not_verify_the_cert = policy_object.get_policies(\n action=U2FACTION.NO_VERIFY_CERT,\n scope=SCOPE.ENROLL,\n realm=token_realm,\n user=token_user,\n resolver=token_resolver,\n active=True,\n client=g.client_ip)\n if do_not_verify_the_cert:\n request.all_data[\"u2f.verify_cert\"] = False\n\n log.debug(\"Should we not verify the attestation certificate? \"\n \"Policies: {0!s}\".format(do_not_verify_the_cert))\n return True\n\n\ndef u2ftoken_allowed(request, action):\n \"\"\"\n This is a token specific wrapper for u2f token for the endpoint\n /token/init.\n According to the policy scope=SCOPE.ENROLL,\n action=U2FACTION.REQ it checks, if the assertion certificate is an\n allowed U2F token type.\n\n If the token, which is enrolled contains a non allowed attestation \n certificate, we bail out.\n\n :param request: \n :param action: \n :return: \n \"\"\"\n policy_object = g.policy_object\n # Get the registration data of the 2nd step of enrolling a U2F device\n reg_data = request.all_data.get(\"regdata\")\n if reg_data:\n # We have a registered u2f device!\n serial = request.all_data.get(\"serial\")\n user_object = request.User\n\n # We just check, if the issuer is allowed, not if the certificate\n # is still valid! (verify_cert=False)\n attestation_cert, user_pub_key, key_handle, \\\n signature, description = parse_registration_data(reg_data,\n verify_cert=False)\n\n cert_info = {\n \"attestation_issuer\":\n x509name_to_string(attestation_cert.get_issuer()),\n \"attestation_serial\": \"{!s}\".format(\n attestation_cert.get_serial_number()),\n \"attestation_subject\": x509name_to_string(\n attestation_cert.get_subject())}\n\n if user_object:\n token_user = user_object.login\n token_realm = user_object.realm\n token_resolver = user_object.resolver\n else:\n token_realm = token_resolver = token_user = None\n\n allowed_certs_pols = policy_object.get_action_values(\n U2FACTION.REQ,\n scope=SCOPE.ENROLL,\n realm=token_realm,\n user=token_user,\n resolver=token_resolver,\n client=g.client_ip)\n for allowed_cert in allowed_certs_pols:\n tag, matching, _rest = allowed_cert.split(\"/\", 3)\n tag_value = cert_info.get(\"attestation_{0!s}\".format(tag))\n # if we do not get a match, we bail out\n m = re.search(matching, tag_value)\n if not m:\n log.warning(\"The U2F device {0!s} is not \"\n \"allowed to be registered due to policy \"\n \"restriction\".format(\n serial))\n raise PolicyError(\"The U2F device is not allowed \"\n \"to be registered due to policy \"\n \"restriction.\")\n # TODO: Maybe we should delete the token, as it is a not\n # usable U2F token, now.\n\n return True\n\n\ndef allowed_audit_realm(request=None, action=None):\n \"\"\"\n This decorator function takes the request and adds additional parameters \n to the request according to the policy\n for the SCOPE.ADMIN or ACTION.AUDIT\n :param request:\n :param action:\n :return: True\n \"\"\"\n admin_user = g.logged_in_user\n policy_object = g.policy_object\n pols = policy_object.get_policies(\n action=ACTION.AUDIT,\n scope=SCOPE.ADMIN,\n user=admin_user.get(\"username\"),\n client=g.client_ip,\n active=True)\n\n if pols:\n # get all values in realm:\n allowed_audit_realms = []\n for pol in pols:\n if pol.get(\"realm\"):\n allowed_audit_realms += pol.get(\"realm\")\n request.all_data[\"allowed_audit_realm\"] = list(set(\n allowed_audit_realms))\n\n return True\n\n\n",
"path": "privacyidea/api/lib/prepolicy.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# 2017-04-22 Cornelius Kölbel <[email protected]>\n# Add wrapper for U2F token\n# 2017-01-18 Cornelius Kölbel <[email protected]>\n# Add token specific PIN policies based on\n# Quynh's pull request.\n# 2016-11-29 Cornelius Kölbel <[email protected]>\n# Add timelimit for audit entries\n# 2016-08-30 Cornelius Kölbel <[email protected]>\n# Add decorator to save the client type to the database\n# 2016-07-17 Cornelius Kölbel <[email protected]>\n# Add realmadmin decorator\n# 2016-05-18 Cornelius Kölbel <[email protected]>\n# Add resolver to check_base_action\n# 2016-04-29 Cornelius Kölbel <[email protected]>\n# Add init_token_defaults to set default parameters\n# during token init.\n# 2016-04-08 Cornelius Kölbel <[email protected]>\n# Avoid \"None\" as redundant 2nd argument\n# 2015-12-28 Cornelius Kölbel <[email protected]>\n# Add ACTION.REQUIREDEMAIL\n# 2015-12-12 Cornelius Kölbel <[email protected]>\n# Change eval to importlib\n# 2015-11-04 Cornelius Kölbel <[email protected]>\n# Add check for REMOTE_USER\n# 2015-04-13 Cornelius Kölbel <[email protected]>\n# Add hook for external decorator for init and assign\n# 2015-02-06 Cornelius Kölbel <[email protected]>\n# Create this module for enabling decorators for API calls\n#\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nThese are the policy decorators as PRE conditions for the API calls.\nI.e. these conditions are executed before the wrapped API call.\nThis module uses the policy base functions from\nprivacyidea.lib.policy but also components from flask like g.\n\nWrapping the functions in a decorator class enables easy modular testing.\n\nThe functions of this module are tested in tests/test_api_lib_policy.py\n\"\"\"\nimport logging\nlog = logging.getLogger(__name__)\nfrom privacyidea.lib.error import PolicyError, RegistrationError\nfrom flask import g, current_app\nfrom privacyidea.lib.policy import SCOPE, ACTION, PolicyClass\nfrom privacyidea.lib.user import (get_user_from_param, get_default_realm,\n split_user)\nfrom privacyidea.lib.token import (get_tokens, get_realms_of_token)\nfrom privacyidea.lib.utils import (generate_password, get_client_ip,\n parse_timedelta, is_true)\nfrom privacyidea.lib.auth import ROLE\nfrom privacyidea.api.lib.utils import getParam\nfrom privacyidea.lib.clientapplication import save_clientapplication\nfrom privacyidea.lib.config import (get_token_class, get_from_config, SYSCONF)\nimport functools\nimport jwt\nimport re\nimport importlib\n# Token specific imports!\nfrom privacyidea.lib.tokens.u2ftoken import (U2FACTION, parse_registration_data)\nfrom privacyidea.lib.tokens.u2f import x509name_to_string\n\noptional = True\nrequired = False\n\n\nclass prepolicy(object):\n \"\"\"\n This is the decorator wrapper to call a specific function before an API\n call.\n The prepolicy decorator is to be used in the API calls.\n A prepolicy decorator then will modify the request data or raise an\n exception\n \"\"\"\n def __init__(self, function, request, action=None):\n \"\"\"\n :param function: This is the policy function the is to be called\n :type function: function\n :param request: The original request object, that needs to be passed\n :type request: Request Object\n \"\"\"\n self.action = action\n self.request = request\n self.function = function\n\n def __call__(self, wrapped_function):\n \"\"\"\n This decorates the given function. The prepolicy decorator is ment\n for API functions on the API level.\n\n If some error occur the a PolicyException is raised.\n\n The decorator function can modify the request data.\n\n :param wrapped_function: The function, that is decorated.\n :type wrapped_function: API function\n :return: None\n \"\"\"\n @functools.wraps(wrapped_function)\n def policy_wrapper(*args, **kwds):\n self.function(request=self.request,\n action=self.action)\n return wrapped_function(*args, **kwds)\n\n return policy_wrapper\n\n\ndef init_random_pin(request=None, action=None):\n \"\"\"\n This policy function is to be used as a decorator in the API init function.\n If the policy is set accordingly it adds a random PIN to the\n request.all_data like.\n\n It uses the policy SCOPE.ENROLL, ACTION.OTPPINRANDOM to set a random OTP\n PIN during Token enrollment\n \"\"\"\n params = request.all_data\n policy_object = g.policy_object\n user_object = get_user_from_param(params)\n # get the length of the random PIN from the policies\n pin_pols = policy_object.get_action_values(action=ACTION.OTPPINRANDOM,\n scope=SCOPE.ENROLL,\n user=user_object.login,\n realm=user_object.realm,\n client=g.client_ip,\n unique=True)\n\n if len(pin_pols) == 1:\n log.debug(\"Creating random OTP PIN with length {0!s}\".format(pin_pols[0]))\n request.all_data[\"pin\"] = generate_password(size=int(pin_pols[0]))\n\n # handle the PIN\n handle_pols = policy_object.get_action_values(\n action=ACTION.PINHANDLING, scope=SCOPE.ENROLL,\n user=user_object.login, realm=user_object.realm,\n client=g.client_ip)\n # We can have more than one pin handler policy. So we can process the\n # PIN in several ways!\n for handle_pol in handle_pols:\n log.debug(\"Handle the random PIN with the class {0!s}\".format(handle_pol))\n packageName = \".\".join(handle_pol.split(\".\")[:-1])\n className = handle_pol.split(\".\")[-1:][0]\n mod = __import__(packageName, globals(), locals(), [className])\n pin_handler_class = getattr(mod, className)\n pin_handler = pin_handler_class()\n # Send the PIN\n pin_handler.send(request.all_data[\"pin\"],\n request.all_data.get(\"serial\", \"N/A\"),\n user_object,\n tokentype=request.all_data.get(\"type\", \"hotp\"),\n logged_in_user=g.logged_in_user)\n\n return True\n\n\ndef realmadmin(request=None, action=None):\n \"\"\"\n This decorator adds the first REALM to the parameters if the\n administrator, calling this API is a realm admin.\n This way, if the admin calls e.g. GET /user without realm parameter,\n he will not see all users, but only users in one of his realms.\n\n TODO: If a realm admin is allowed to see more than one realm,\n this is not handled at the moment. We need to change the underlying\n library functions!\n\n :param request: The HTTP reqeust\n :param action: The action like ACTION.USERLIST\n \"\"\"\n # This decorator is only valid for admins\n if g.logged_in_user.get(\"role\") == ROLE.ADMIN:\n params = request.all_data\n if not \"realm\" in params:\n # add the realm to params\n policy_object = g.policy_object\n po = policy_object.get_policies(\n action=action, scope=SCOPE.ADMIN,\n user=g.logged_in_user.get(\"username\"),\n adminrealm=g.logged_in_user.get(\"realm\"), client=g.client_ip,\n active=True)\n # TODO: fix this: there could be a list of policies with a list\n # of realms!\n if po and po[0].get(\"realm\"):\n request.all_data[\"realm\"] = po[0].get(\"realm\")[0]\n\n return True\n\n\ndef check_otp_pin(request=None, action=None):\n \"\"\"\n This policy function checks if the OTP PIN that is about to be set\n follows the OTP PIN policies ACTION.OTPPINMAXLEN, ACTION.OTPPINMINLEN and\n ACTION.OTPPINCONTENTS and token-type-specific PIN policy actions in the\n SCOPE.USER or SCOPE.ADMIN. It is used to decorate the API functions.\n\n The pin is investigated in the params as \"otppin\" or \"pin\"\n\n In case the given OTP PIN does not match the requirements an exception is\n raised.\n \"\"\"\n params = request.all_data\n realm = params.get(\"realm\")\n pin = params.get(\"otppin\", \"\") or params.get(\"pin\", \"\")\n serial = params.get(\"serial\")\n tokentype = params.get(\"type\")\n if not serial and action == ACTION.SETPIN:\n path_elems = request.path.split(\"/\")\n serial = path_elems[-1]\n # Also set it for later use\n request.all_data[\"serial\"] = serial\n if serial:\n # if this is a token, that does not use a pin, we ignore this check\n # And immediately return true\n tokensobject_list = get_tokens(serial=serial)\n if len(tokensobject_list) == 1:\n if tokensobject_list[0].using_pin is False:\n return True\n tokentype = tokensobject_list[0].token.tokentype\n # the default tokentype is still HOTP\n tokentype = tokentype or \"hotp\"\n policy_object = g.policy_object\n role = g.logged_in_user.get(\"role\")\n username = g.logged_in_user.get(\"username\")\n if role == ROLE.ADMIN:\n scope = SCOPE.ADMIN\n admin_realm = g.logged_in_user.get(\"realm\")\n realm = params.get(\"realm\", \"\")\n else:\n scope = SCOPE.USER\n realm = g.logged_in_user.get(\"realm\")\n admin_realm = None\n # get the policies for minimum length, maximum length and PIN contents\n # first try to get a token specific policy - otherwise fall back to\n # default policy\n pol_minlen = policy_object.get_action_values(\n action=\"{0!s}_{1!s}\".format(tokentype, ACTION.OTPPINMINLEN),\n scope=scope, user=username, realm=realm, adminrealm=admin_realm,\n client=g.client_ip, unique=True) or \\\n policy_object.get_action_values(\n action=ACTION.OTPPINMINLEN, scope=scope, user=username,\n realm=realm, adminrealm=admin_realm, client=g.client_ip,\n unique=True)\n\n pol_maxlen = policy_object.get_action_values(\n action=\"{0!s}_{1!s}\".format(tokentype, ACTION.OTPPINMAXLEN),\n scope=scope, user=username, realm=realm, adminrealm=admin_realm,\n client=g.client_ip, unique=True) or \\\n policy_object.get_action_values(\n action=ACTION.OTPPINMAXLEN, scope=scope, user=username,\n realm=realm, adminrealm=admin_realm, client=g.client_ip,\n unique=True)\n\n pol_contents = policy_object.get_action_values(\n action=\"{0!s}_{1!s}\".format(tokentype, ACTION.OTPPINCONTENTS),\n scope=scope, user=username, realm=realm, adminrealm=admin_realm,\n client=g.client_ip, unique=True) or \\\n policy_object.get_action_values(\n action=ACTION.OTPPINCONTENTS, scope=scope,\n user=username, realm=realm, adminrealm=admin_realm,\n client=g.client_ip, unique=True)\n\n if len(pol_minlen) == 1 and len(pin) < int(pol_minlen[0]):\n # check the minimum length requirement\n raise PolicyError(\"The minimum OTP PIN length is {0!s}\".format(\n pol_minlen[0]))\n\n if len(pol_maxlen) == 1 and len(pin) > int(pol_maxlen[0]):\n # check the maximum length requirement\n raise PolicyError(\"The maximum OTP PIN length is {0!s}\".format(\n pol_maxlen[0]))\n\n if len(pol_contents) == 1:\n # check the contents requirement\n chars = \"[a-zA-Z]\" # c\n digits = \"[0-9]\" # n\n special = \"[.:,;_<>+*!/()=?$§%&#~\\^-]\" # s\n no_others = False\n grouping = False\n\n if pol_contents[0] == \"-\":\n no_others = True\n pol_contents = pol_contents[1:]\n elif pol_contents[0] == \"+\":\n grouping = True\n pol_contents = pol_contents[1:]\n # TODO implement grouping and substraction\n if \"c\" in pol_contents[0] and not re.search(chars, pin):\n raise PolicyError(\"Missing character in PIN: {0!s}\".format(chars))\n if \"n\" in pol_contents[0] and not re.search(digits, pin):\n raise PolicyError(\"Missing character in PIN: {0!s}\".format(digits))\n if \"s\" in pol_contents[0] and not re.search(special, pin):\n raise PolicyError(\"Missing character in PIN: {0!s}\".format(special))\n\n return True\n\n\ndef papertoken_count(request=None, action=None):\n \"\"\"\n This is a token specific wrapper for paper token for the endpoint\n /token/init.\n According to the policy scope=SCOPE.ENROLL,\n action=PAPERACTION.PAPER_COUNT it sets the parameter papertoken_count to\n enroll a paper token with such many OTP values.\n\n :param request:\n :param action:\n :return:\n \"\"\"\n from privacyidea.lib.tokens.papertoken import PAPERACTION\n user_object = request.User\n policy_object = g.policy_object\n pols = policy_object.get_action_values(\n action=PAPERACTION.PAPERTOKEN_COUNT,\n scope=SCOPE.ENROLL,\n user=user_object.login,\n resolver=user_object.resolver,\n realm=user_object.realm,\n client=g.client_ip,\n unique=True)\n\n if pols:\n papertoken_count = pols[0]\n request.all_data[\"papertoken_count\"] = papertoken_count\n\n return True\n\n\ndef tantoken_count(request=None, action=None):\n \"\"\"\n This is a token specific wrapper for tan token for the endpoint\n /token/init.\n According to the policy scope=SCOPE.ENROLL,\n action=TANACTION.TANTOKEN_COUNT it sets the parameter tantoken_count to\n enroll a tan token with such many OTP values.\n\n :param request:\n :param action:\n :return:\n \"\"\"\n from privacyidea.lib.tokens.tantoken import TANACTION\n user_object = request.User\n policy_object = g.policy_object\n pols = policy_object.get_action_values(\n action=TANACTION.TANTOKEN_COUNT,\n scope=SCOPE.ENROLL,\n user=user_object.login,\n resolver=user_object.resolver,\n realm=user_object.realm,\n client=g.client_ip,\n unique=True)\n\n if pols:\n tantoken_count = pols[0]\n request.all_data[\"tantoken_count\"] = tantoken_count\n\n return True\n\n\ndef encrypt_pin(request=None, action=None):\n \"\"\"\n This policy function is to be used as a decorator for several API functions.\n E.g. token/assign, token/setpin, token/init\n If the policy is set to define the PIN to be encrypted,\n the request.all_data is modified like this:\n encryptpin = True\n\n It uses the policy SCOPE.ENROLL, ACTION.ENCRYPTPIN\n \"\"\"\n params = request.all_data\n policy_object = g.policy_object\n user_object = get_user_from_param(params)\n # get the length of the random PIN from the policies\n pin_pols = policy_object.get_policies(action=ACTION.ENCRYPTPIN,\n scope=SCOPE.ENROLL,\n user=user_object.login,\n realm=user_object.realm,\n client=g.client_ip,\n active=True)\n\n if pin_pols:\n request.all_data[\"encryptpin\"] = \"True\"\n else:\n if \"encryptpin\" in request.all_data:\n del request.all_data[\"encryptpin\"]\n\n return True\n\n\ndef enroll_pin(request=None, action=None):\n \"\"\"\n This policy function is used as decorator for init token.\n It checks, if the user or the admin is allowed to set a token PIN during\n enrollment. If not, it deleted the PIN from the request.\n \"\"\"\n policy_object = g.policy_object\n role = g.logged_in_user.get(\"role\")\n if role == ROLE.USER:\n scope = SCOPE.USER\n username = g.logged_in_user.get(\"username\")\n realm = g.logged_in_user.get(\"realm\")\n adminrealm = None\n else:\n scope = SCOPE.ADMIN\n username = g.logged_in_user.get(\"username\")\n realm = getParam(request.all_data, \"realm\")\n adminrealm = g.logged_in_user.get(\"realm\")\n pin_pols = policy_object.get_policies(action=ACTION.ENROLLPIN,\n scope=scope,\n user=username,\n realm=realm,\n adminrealm=adminrealm,\n client=g.client_ip,\n active=True)\n action_at_all = policy_object.get_policies(scope=scope,\n active=True,\n all_times=True)\n\n if action_at_all and not pin_pols:\n # Not allowed to set a PIN during enrollment!\n if \"pin\" in request.all_data:\n del request.all_data[\"pin\"]\n return True\n\n\ndef init_token_defaults(request=None, action=None):\n \"\"\"\n This policy function is used as a decorator for the API init function.\n Depending on policy settings it can add token specific default values\n like totp_hashlib, hotp_hashlib, totp_otplen...\n \"\"\"\n params = request.all_data\n ttype = params.get(\"type\") or \"hotp\"\n token_class = get_token_class(ttype)\n default_settings = token_class.get_default_settings(params,\n g.logged_in_user,\n g.policy_object,\n g.client_ip)\n log.debug(\"Adding default settings {0!s} for token type {1!s}\".format(\n default_settings, ttype))\n request.all_data.update(default_settings)\n return True\n\n\ndef init_tokenlabel(request=None, action=None):\n \"\"\"\n This policy function is to be used as a decorator in the API init function.\n It adds the tokenlabel definition to the params like this:\n params : { \"tokenlabel\": \"<u>@<r>\" }\n\n In addtion it adds the tokenissuer to the params like this:\n params : { \"tokenissuer\": \"privacyIDEA instance\" }\n\n It uses the policy SCOPE.ENROLL, ACTION.TOKENLABEL and ACTION.TOKENISSUER\n to set the tokenlabel and tokenissuer\n of Smartphone tokens during enrollment and this fill the details of the\n response.\n \"\"\"\n params = request.all_data\n policy_object = g.policy_object\n user_object = get_user_from_param(params)\n # get the serials from a policy definition\n label_pols = policy_object.get_action_values(action=ACTION.TOKENLABEL,\n scope=SCOPE.ENROLL,\n user=user_object.login,\n realm=user_object.realm,\n client=g.client_ip,\n unique=True,\n allow_white_space_in_action=True)\n\n if len(label_pols) == 1:\n # The policy was set, so we need to set the tokenlabel in the request.\n request.all_data[\"tokenlabel\"] = label_pols[0]\n\n issuer_pols = policy_object.get_action_values(action=ACTION.TOKENISSUER,\n scope=SCOPE.ENROLL,\n user=user_object.login,\n realm=user_object.realm,\n client=g.client_ip,\n unique=True,\n allow_white_space_in_action=True)\n if len(issuer_pols) == 1:\n request.all_data[\"tokenissuer\"] = issuer_pols[0]\n\n return True\n\n\ndef twostep_enrollment_activation(request=None, action=None):\n \"\"\"\n This policy function enables the two-step enrollment process according\n to the configured policies.\n It is used to decorate the ``/token/init`` endpoint.\n\n If a ``<type>_2step`` policy matches, the ``2stepinit`` parameter is handled according to the policy.\n If no policy matches, the ``2stepinit`` parameter is removed from the request data.\n \"\"\"\n policy_object = g.policy_object\n user_object = get_user_from_param(request.all_data)\n serial = getParam(request.all_data, \"serial\", optional)\n token_type = getParam(request.all_data, \"type\", optional, \"hotp\")\n token_exists = False\n if serial:\n tokensobject_list = get_tokens(serial=serial)\n if len(tokensobject_list) == 1:\n token_type = tokensobject_list[0].token.tokentype\n token_exists = True\n token_type = token_type.lower()\n role = g.logged_in_user.get(\"role\")\n # Differentiate between an admin enrolling a token for the\n # user and a user self-enrolling a token.\n if role == ROLE.ADMIN:\n scope = SCOPE.ADMIN\n adminrealm = g.logged_in_user.get(\"realm\")\n else:\n scope = SCOPE.USER\n adminrealm = None\n realm = user_object.realm\n # In any case, the policy's user attribute is matched against the\n # currently logged-in user (which may be the admin or the\n # self-enrolling user).\n user = g.logged_in_user.get(\"username\")\n # Tokentypes have separate twostep actions\n action = \"{}_2step\".format(token_type)\n twostep_enabled_pols = policy_object.get_action_values(action=action,\n scope=scope,\n unique=True,\n user=user,\n realm=realm,\n client=g.client_ip,\n adminrealm=adminrealm)\n if twostep_enabled_pols:\n enabled_setting = twostep_enabled_pols[0]\n if enabled_setting == \"allow\":\n # The user is allowed to pass 2stepinit=1\n pass\n elif enabled_setting == \"force\":\n # We force 2stepinit to be 1 (if the token does not exist yet)\n if not token_exists:\n request.all_data[\"2stepinit\"] = 1\n else:\n raise PolicyError(\"Unknown 2step policy setting: {}\".format(enabled_setting))\n else:\n # If no policy matches, the user is not allowed\n # to pass 2stepinit\n # Force two-step initialization to be None\n if \"2stepinit\" in request.all_data:\n del request.all_data[\"2stepinit\"]\n return True\n\n\ndef twostep_enrollment_parameters(request=None, action=None):\n \"\"\"\n If the ``2stepinit`` parameter is set to true, this policy function\n reads additional configuration from policies and adds it\n to ``request.all_data``, that is:\n\n * ``{type}_2step_serversize`` is written to ``2step_serversize``\n * ``{type}_2step_clientsize`` is written to ``2step_clientsize`\n * ``{type}_2step_difficulty`` is written to ``2step_difficulty``\n\n If no policy matches, the value passed by the user is kept.\n\n This policy function is used to decorate the ``/token/init`` endpoint.\n \"\"\"\n policy_object = g.policy_object\n user_object = get_user_from_param(request.all_data)\n serial = getParam(request.all_data, \"serial\", optional)\n token_type = getParam(request.all_data, \"type\", optional, \"hotp\")\n if serial:\n tokensobject_list = get_tokens(serial=serial)\n if len(tokensobject_list) == 1:\n token_type = tokensobject_list[0].token.tokentype\n token_type = token_type.lower()\n role = g.logged_in_user.get(\"role\")\n # Differentiate between an admin enrolling a token for the\n # user and a user self-enrolling a token.\n if role == ROLE.ADMIN:\n adminrealm = g.logged_in_user.get(\"realm\")\n else:\n adminrealm = None\n realm = user_object.realm\n # In any case, the policy's user attribute is matched against the\n # currently logged-in user (which may be the admin or the\n # self-enrolling user).\n user = g.logged_in_user.get(\"username\")\n # Tokentypes have separate twostep actions\n if is_true(getParam(request.all_data, \"2stepinit\", optional)):\n parameters = (\"2step_serversize\", \"2step_clientsize\", \"2step_difficulty\")\n for parameter in parameters:\n action = u\"{}_{}\".format(token_type, parameter)\n action_values = policy_object.get_action_values(action=action,\n scope=SCOPE.ENROLL,\n unique=True,\n user=user,\n realm=realm,\n client=g.client_ip,\n adminrealm=adminrealm)\n if action_values:\n request.all_data[parameter] = action_values[0]\n\n\ndef check_max_token_user(request=None, action=None):\n \"\"\"\n Pre Policy\n This checks the maximum token per user policy.\n Check ACTION.MAXTOKENUSER\n\n This decorator can wrap:\n /token/init (with a realm and user)\n /token/assign\n\n :param req:\n :param action:\n :return: True otherwise raises an Exception\n \"\"\"\n ERROR = \"The number of tokens for this user is limited!\"\n params = request.all_data\n user_object = get_user_from_param(params)\n serial = getParam(params, \"serial\")\n if user_object.login:\n policy_object = g.policy_object\n limit_list = policy_object.get_action_values(ACTION.MAXTOKENUSER,\n scope=SCOPE.ENROLL,\n realm=user_object.realm,\n user=user_object.login,\n client=g.client_ip)\n if limit_list:\n # we need to check how many tokens the user already has assigned!\n tokenobject_list = get_tokens(user=user_object)\n if serial and serial in [tok.token.serial for tok in tokenobject_list]:\n # If a serial is provided and this token already exists, the\n # token can be regenerated\n return True\n already_assigned_tokens = len(tokenobject_list)\n if already_assigned_tokens >= max([int(x) for x in limit_list]):\n raise PolicyError(ERROR)\n return True\n\n\ndef check_max_token_realm(request=None, action=None):\n \"\"\"\n Pre Policy\n This checks the maximum token per realm.\n Check ACTION.MAXTOKENREALM\n\n This decorator can wrap:\n /token/init (with a realm and user)\n /token/assign\n /token/tokenrealms\n\n :param req: The request that is intercepted during the API call\n :type req: Request Object\n :param action: An optional Action\n :type action: basestring\n :return: True otherwise raises an Exception\n \"\"\"\n ERROR = \"The number of tokens in this realm is limited!\"\n params = request.all_data\n user_object = get_user_from_param(params)\n if user_object:\n realm = user_object.realm\n else: # pragma: no cover\n realm = params.get(\"realm\")\n\n if realm:\n policy_object = g.policy_object\n limit_list = policy_object.get_action_values(ACTION.MAXTOKENREALM,\n scope=SCOPE.ENROLL,\n realm=realm,\n client=g.client_ip)\n if limit_list:\n # we need to check how many tokens the realm already has assigned!\n tokenobject_list = get_tokens(realm=realm)\n already_assigned_tokens = len(tokenobject_list)\n if already_assigned_tokens >= max([int(x) for x in limit_list]):\n raise PolicyError(ERROR)\n return True\n\n\ndef set_realm(request=None, action=None):\n \"\"\"\n Pre Policy\n This pre condition gets the current realm and verifies if the realm\n should be rewritten due to the policy definition.\n I takes the realm from the request and - if a policy matches - replaces\n this realm with the realm defined in the policy\n\n Check ACTION.SETREALM\n\n This decorator should wrap\n /validate/check\n\n :param request: The request that is intercepted during the API call\n :type request: Request Object\n :param action: An optional Action\n :type action: basestring\n :returns: Always true. Modified the parameter request\n \"\"\"\n #user_object = get_user_from_param(request.all_data)\n user_object = request.User\n # At the moment a realm parameter with no user parameter returns a user\n # object like \"@realm\". If this is changed one day, we need to also fetch\n # the realm\n if user_object:\n realm = user_object.realm\n username = user_object.login\n else: # pragma: no cover\n realm = request.all_data.get(\"realm\")\n username = None\n\n policy_object = g.policy_object\n new_realm = policy_object.get_action_values(ACTION.SETREALM,\n scope=SCOPE.AUTHZ,\n user=username,\n realm=realm,\n client=g.client_ip)\n if len(new_realm) > 1:\n raise PolicyError(\"I do not know, to which realm I should set the \"\n \"new realm. Conflicting policies exist.\")\n elif len(new_realm) == 1:\n # There is one specific realm, which we set in the request\n request.all_data[\"realm\"] = new_realm[0]\n\n return True\n\n\ndef required_email(request=None, action=None):\n \"\"\"\n This precondition checks if the \"email\" parameter matches the regular\n expression in the policy scope=register, action=requiredemail.\n See :ref:`policy_requiredemail`.\n\n Check ACTION.REQUIREDEMAIL\n\n This decorator should wrap POST /register\n\n :param request: The Request Object\n :param action: An optional Action\n :return: Modifies the request parameters or raises an Exception\n \"\"\"\n email = getParam(request.all_data, \"email\")\n email_found = False\n email_pols = g.policy_object.\\\n get_action_values(ACTION.REQUIREDEMAIL, scope=SCOPE.REGISTER,\n client=g.client_ip)\n if email and email_pols:\n for email_pol in email_pols:\n # The policy is only \"/regularexpr/\".\n search = email_pol.strip(\"/\")\n if re.findall(search, email):\n email_found = True\n if not email_found:\n raise RegistrationError(\"This email address is not allowed to \"\n \"register!\")\n\n return True\n\n\ndef auditlog_age(request=None, action=None):\n \"\"\"\n This pre condition checks for the policy auditlog_age and set the\n \"timelimit\" parameter of the audit search API.\n\n Check ACTION.AUDIT_AGE\n\n The decorator can wrap GET /audit/\n\n :param request: The request that is intercepted during the API call\n :type request: Request Object\n :param action: An optional Action\n :type action: basestring\n :returns: Always true. Modified the parameter request\n \"\"\"\n user_object = request.User\n policy_object = g.policy_object\n role = g.logged_in_user.get(\"role\")\n if role == ROLE.ADMIN:\n scope = SCOPE.ADMIN\n adminrealm = g.logged_in_user.get(\"realm\")\n user = g.logged_in_user.get(\"username\")\n realm = user_object.realm\n else:\n scope = SCOPE.USER\n adminrealm = None\n user = user_object.login\n realm = user_object.realm\n\n audit_age = policy_object.get_action_values(ACTION.AUDIT_AGE,\n scope=scope,\n adminrealm=adminrealm,\n realm=realm,\n user=user,\n client=g.client_ip,\n unique=True)\n timelimit = None\n timelimit_s = None\n for aa in audit_age:\n if not timelimit:\n timelimit_s = aa\n timelimit = parse_timedelta(timelimit_s)\n else:\n # We will use the longest allowed timelimit\n if parse_timedelta(aa) > timelimit:\n timelimit_s = aa\n timelimit = parse_timedelta(timelimit_s)\n\n log.debug(\"auditlog_age: {0!s}\".format(timelimit_s))\n request.all_data[\"timelimit\"] = timelimit_s\n\n return True\n\n\ndef mangle(request=None, action=None):\n \"\"\"\n This pre condition checks if either of the parameters pass, user or realm\n in a validate/check request should be rewritten based on an\n authentication policy with action \"mangle\".\n See :ref:`policy_mangle` for an example.\n\n Check ACTION.MANGLE\n\n This decorator should wrap\n /validate/check\n\n :param request: The request that is intercepted during the API call\n :type request: Request Object\n :param action: An optional Action\n :type action: basestring\n :returns: Always true. Modified the parameter request\n \"\"\"\n user_object = request.User\n\n policy_object = g.policy_object\n mangle_pols = policy_object.get_action_values(ACTION.MANGLE,\n scope=SCOPE.AUTH,\n realm=user_object.realm,\n user=user_object.login,\n client=g.client_ip)\n # We can have several mangle policies! One for user, one for realm and\n # one for pass. So we do no checking here.\n for mangle_pol_action in mangle_pols:\n # mangle_pol_action looks like this:\n # keyword/search/replace/. Where \"keyword\" can be \"user\", \"pass\" or\n # \"realm\".\n mangle_key, search, replace, _rest = mangle_pol_action.split(\"/\", 3)\n mangle_value = request.all_data.get(mangle_key)\n if mangle_value:\n log.debug(\"mangling authentication data: {0!s}\".format(mangle_key))\n request.all_data[mangle_key] = re.sub(search, replace,\n mangle_value)\n if mangle_key in [\"user\", \"realm\"]:\n request.User = get_user_from_param(request.all_data)\n return True\n\n\ndef check_anonymous_user(request=None, action=None):\n \"\"\"\n This decorator function takes the request and verifies the given action\n for the SCOPE USER without an authenticated user but the user from the\n parameters.\n\n This is used with password_reset\n\n :param request:\n :param action:\n :return: True otherwise raises an Exception\n \"\"\"\n ERROR = \"User actions are defined, but this action is not allowed!\"\n params = request.all_data\n policy_object = g.policy_object\n scope = SCOPE.USER\n user_obj = get_user_from_param(params)\n username = user_obj.login\n realm = user_obj.realm\n\n action = policy_object.get_policies(action=action,\n user=username,\n realm=realm,\n scope=scope,\n client=g.client_ip,\n adminrealm=None,\n active=True)\n action_at_all = policy_object.get_policies(scope=scope,\n active=True,\n all_times=True)\n if action_at_all and len(action) == 0:\n raise PolicyError(ERROR)\n return True\n\n\ndef check_base_action(request=None, action=None, anonymous=False):\n \"\"\"\n This decorator function takes the request and verifies the given action\n for the SCOPE ADMIN or USER.\n :param request:\n :param action:\n :param anonymous: If set to True, the user data is taken from the request\n parameters.\n :return: True otherwise raises an Exception\n \"\"\"\n ERROR = {\"user\": \"User actions are defined, but the action %s is not \"\n \"allowed!\" % action,\n \"admin\": \"Admin actions are defined, but the action %s is not \"\n \"allowed!\" % action}\n params = request.all_data\n policy_object = g.policy_object\n username = g.logged_in_user.get(\"username\")\n role = g.logged_in_user.get(\"role\")\n scope = SCOPE.ADMIN\n admin_realm = g.logged_in_user.get(\"realm\")\n realm = None\n resolver = None\n\n if role == ROLE.USER:\n scope = SCOPE.USER\n # Reset the admin realm\n admin_realm = None\n realm = realm or g.logged_in_user.get(\"realm\")\n\n # In certain cases we can not resolve the user by the serial!\n if action not in [ACTION.AUDIT]:\n realm = params.get(\"realm\")\n if type(realm) == list and len(realm) == 1:\n realm = realm[0]\n resolver = params.get(\"resolver\")\n # get the realm by the serial:\n if not realm and params.get(\"serial\"):\n realm = get_realms_of_token(params.get(\"serial\"),\n only_first_realm=True)\n\n # get the realm by the serial, while the serial is part of the URL like\n # DELETE /token/serial\n if not realm and request.view_args and request.view_args.get(\"serial\"):\n realm = get_realms_of_token(request.view_args.get(\"serial\"),\n only_first_realm=True)\n\n action = policy_object.get_policies(action=action,\n user=username,\n realm=realm,\n scope=scope,\n resolver=resolver,\n client=g.client_ip,\n adminrealm=admin_realm,\n active=True)\n action_at_all = policy_object.get_policies(scope=scope,\n active=True,\n all_times=True)\n if action_at_all and len(action) == 0:\n raise PolicyError(ERROR.get(role))\n return True\n\n\ndef check_token_upload(request=None, action=None):\n \"\"\"\n This decorator function takes the request and verifies the given action\n for scope ADMIN\n :param req:\n :param filename:\n :return:\n \"\"\"\n params = request.all_data\n policy_object = g.policy_object\n username = g.logged_in_user.get(\"username\")\n admin_realm = g.logged_in_user.get(\"realm\")\n action = policy_object.get_policies(action=ACTION.IMPORT,\n user=username,\n realm=params.get(\"realm\"),\n scope=SCOPE.ADMIN,\n client=g.client_ip,\n adminrealm=admin_realm,\n active=True)\n action_at_all = policy_object.get_policies(scope=SCOPE.ADMIN,\n active=True, all_times=True)\n if action_at_all and len(action) == 0:\n raise PolicyError(\"Admin actions are defined, but you are not allowed\"\n \" to upload token files.\")\n return True\n\n\ndef check_token_init(request=None, action=None):\n \"\"\"\n This decorator function takes the request and verifies\n if the requested tokentype is allowed to be enrolled in the SCOPE ADMIN\n or the SCOPE USER.\n :param request:\n :param action:\n :return: True or an Exception is raised\n \"\"\"\n ERROR = {\"user\": \"User actions are defined, you are not allowed to \"\n \"enroll this token type!\",\n \"admin\": \"Admin actions are defined, but you are not allowed to \"\n \"enroll this token type!\"}\n params = request.all_data\n policy_object = g.policy_object\n username = g.logged_in_user.get(\"username\")\n role = g.logged_in_user.get(\"role\")\n admin_realm = g.logged_in_user.get(\"realm\")\n scope = SCOPE.ADMIN\n if role == ROLE.USER:\n scope = SCOPE.USER\n admin_realm = None\n tokentype = params.get(\"type\", \"HOTP\")\n action = \"enroll{0!s}\".format(tokentype.upper())\n action = policy_object.get_policies(action=action,\n user=username,\n realm=params.get(\"realm\"),\n scope=scope,\n client=g.client_ip,\n adminrealm=admin_realm,\n active=True)\n action_at_all = policy_object.get_policies(scope=scope, active=True,\n all_times=True)\n if action_at_all and len(action) == 0:\n raise PolicyError(ERROR.get(role))\n return True\n\n\ndef check_external(request=None, action=\"init\"):\n \"\"\"\n This decorator is a hook to an external check function, that is called\n before the token/init or token/assign API.\n\n :param request: The REST request\n :type request: flask Request object\n :param action: This is either \"init\" or \"assign\"\n :type action: basestring\n :return: either True or an Exception is raised\n \"\"\"\n function_name = None\n module = None\n try:\n module_func = current_app.config.get(\"PI_INIT_CHECK_HOOK\")\n if module_func:\n module_name = \".\".join(module_func.split(\".\")[:-1])\n module = importlib.import_module(module_name)\n function_name = module_func.split(\".\")[-1]\n except Exception as exx:\n log.error(\"Error importing external check function: {0!s}\".format(exx))\n\n # Import of function was successful\n if function_name:\n external_func = getattr(module, function_name)\n external_func(request, action)\n return True\n\n\ndef api_key_required(request=None, action=None):\n \"\"\"\n This is a decorator for check_user_pass and check_serial_pass.\n It checks, if a policy scope=auth, action=apikeyrequired is set.\n If so, the validate request will only performed, if a JWT token is passed\n with role=validate.\n \"\"\"\n ERROR = \"The policy requires an API key to authenticate, \" \\\n \"but no key was passed.\"\n params = request.all_data\n policy_object = g.policy_object\n #user_object = get_user_from_param(params)\n user_object = request.User\n\n # Get the policies\n action = policy_object.get_policies(action=ACTION.APIKEY,\n user=user_object.login,\n realm=user_object.realm,\n scope=SCOPE.AUTHZ,\n client=g.client_ip,\n active=True)\n # Do we have a policy?\n if action:\n # check if we were passed a correct JWT\n # Get the Authorization token from the header\n auth_token = request.headers.get('PI-Authorization')\n if not auth_token:\n auth_token = request.headers.get('Authorization')\n try:\n r = jwt.decode(auth_token, current_app.secret_key)\n g.logged_in_user = {\"username\": r.get(\"username\", \"\"),\n \"realm\": r.get(\"realm\", \"\"),\n \"role\": r.get(\"role\", \"\")}\n except (AttributeError, jwt.DecodeError):\n # PyJWT 1.3.0 raises AttributeError, PyJWT 1.6.4 raises DecodeError.\n raise PolicyError(\"No valid API key was passed.\")\n\n role = g.logged_in_user.get(\"role\")\n if role != ROLE.VALIDATE:\n raise PolicyError(\"A correct JWT was passed, but it was no API \"\n \"key.\")\n\n # If everything went fine, we call the original function\n return True\n\n\ndef mock_success(req, action):\n \"\"\"\n This is a mock function as an example for check_external. This function\n returns success and the API call will go on unmodified.\n \"\"\"\n return True\n\n\ndef mock_fail(req, action):\n \"\"\"\n This is a mock function as an example for check_external. This function\n creates a problem situation and the token/init or token/assign will show\n this exception accordingly.\n \"\"\"\n raise Exception(\"This is an Exception in an external check function\")\n\n\ndef is_remote_user_allowed(req):\n \"\"\"\n Checks if the REMOTE_USER server variable is allowed to be used.\n\n .. note:: This is not used as a decorator!\n\n :param req: The flask request, containing the remote user and the client IP\n :return:\n \"\"\"\n res = False\n if req.remote_user:\n loginname, realm = split_user(req.remote_user)\n realm = realm or get_default_realm()\n\n # Check if the remote user is allowed\n if \"client_ip\" not in g:\n g.client_ip = get_client_ip(req,\n get_from_config(SYSCONF.OVERRIDECLIENT))\n if \"policy_object\" not in g:\n g.policy_object = PolicyClass()\n ruser_active = g.policy_object.get_action_values(ACTION.REMOTE_USER,\n scope=SCOPE.WEBUI,\n user=loginname,\n realm=realm,\n client=g.client_ip)\n\n res = ruser_active\n\n return res\n\n\ndef save_client_application_type(request, action):\n \"\"\"\n This decorator is used to write the client IP and the HTTP user agent (\n clienttype) to the database.\n\n In fact this is not a **policy** decorator, as it checks no policy. In\n fact, we could however one day\n define this as a policy, too.\n :param req:\n :return:\n \"\"\"\n # retrieve the IP. This will also be the mapped IP!\n client_ip = g.client_ip or \"0.0.0.0\"\n # ...and the user agent.\n ua = request.user_agent\n save_clientapplication(client_ip, \"{0!s}\".format(ua) or \"unknown\")\n return True\n\n\ndef u2ftoken_verify_cert(request, action):\n \"\"\"\n This is a token specific wrapper for u2f token for the endpoint\n /token/init\n According to the policy scope=SCOPE.ENROLL,\n action=U2FACTION.NO_VERIFY_CERT it can add a parameter to the\n enrollment parameters to not verify the attestation certificate.\n The default is to verify the cert.\n :param request:\n :param action:\n :return:\n \"\"\"\n # Get the registration data of the 2nd step of enrolling a U2F device\n ttype = request.all_data.get(\"type\")\n if ttype and ttype.lower() == \"u2f\":\n policy_object = g.policy_object\n # Add the default to verify the cert.\n request.all_data[\"u2f.verify_cert\"] = True\n user_object = request.User\n\n if user_object:\n token_user = user_object.login\n token_realm = user_object.realm\n token_resolver = user_object.resolver\n else:\n token_realm = token_resolver = token_user = None\n\n do_not_verify_the_cert = policy_object.get_policies(\n action=U2FACTION.NO_VERIFY_CERT,\n scope=SCOPE.ENROLL,\n realm=token_realm,\n user=token_user,\n resolver=token_resolver,\n active=True,\n client=g.client_ip)\n if do_not_verify_the_cert:\n request.all_data[\"u2f.verify_cert\"] = False\n\n log.debug(\"Should we not verify the attestation certificate? \"\n \"Policies: {0!s}\".format(do_not_verify_the_cert))\n return True\n\n\ndef u2ftoken_allowed(request, action):\n \"\"\"\n This is a token specific wrapper for u2f token for the endpoint\n /token/init.\n According to the policy scope=SCOPE.ENROLL,\n action=U2FACTION.REQ it checks, if the assertion certificate is an\n allowed U2F token type.\n\n If the token, which is enrolled contains a non allowed attestation \n certificate, we bail out.\n\n :param request: \n :param action: \n :return: \n \"\"\"\n policy_object = g.policy_object\n # Get the registration data of the 2nd step of enrolling a U2F device\n reg_data = request.all_data.get(\"regdata\")\n if reg_data:\n # We have a registered u2f device!\n serial = request.all_data.get(\"serial\")\n user_object = request.User\n\n # We just check, if the issuer is allowed, not if the certificate\n # is still valid! (verify_cert=False)\n attestation_cert, user_pub_key, key_handle, \\\n signature, description = parse_registration_data(reg_data,\n verify_cert=False)\n\n cert_info = {\n \"attestation_issuer\":\n x509name_to_string(attestation_cert.get_issuer()),\n \"attestation_serial\": \"{!s}\".format(\n attestation_cert.get_serial_number()),\n \"attestation_subject\": x509name_to_string(\n attestation_cert.get_subject())}\n\n if user_object:\n token_user = user_object.login\n token_realm = user_object.realm\n token_resolver = user_object.resolver\n else:\n token_realm = token_resolver = token_user = None\n\n allowed_certs_pols = policy_object.get_action_values(\n U2FACTION.REQ,\n scope=SCOPE.ENROLL,\n realm=token_realm,\n user=token_user,\n resolver=token_resolver,\n client=g.client_ip)\n for allowed_cert in allowed_certs_pols:\n tag, matching, _rest = allowed_cert.split(\"/\", 3)\n tag_value = cert_info.get(\"attestation_{0!s}\".format(tag))\n # if we do not get a match, we bail out\n m = re.search(matching, tag_value)\n if not m:\n log.warning(\"The U2F device {0!s} is not \"\n \"allowed to be registered due to policy \"\n \"restriction\".format(\n serial))\n raise PolicyError(\"The U2F device is not allowed \"\n \"to be registered due to policy \"\n \"restriction.\")\n # TODO: Maybe we should delete the token, as it is a not\n # usable U2F token, now.\n\n return True\n\n\ndef allowed_audit_realm(request=None, action=None):\n \"\"\"\n This decorator function takes the request and adds additional parameters \n to the request according to the policy\n for the SCOPE.ADMIN or ACTION.AUDIT\n :param request:\n :param action:\n :return: True\n \"\"\"\n admin_user = g.logged_in_user\n policy_object = g.policy_object\n pols = policy_object.get_policies(\n action=ACTION.AUDIT,\n scope=SCOPE.ADMIN,\n user=admin_user.get(\"username\"),\n adminrealm=admin_user.get(\"realm\"),\n client=g.client_ip,\n active=True)\n\n if pols:\n # get all values in realm:\n allowed_audit_realms = []\n for pol in pols:\n if pol.get(\"realm\"):\n allowed_audit_realms += pol.get(\"realm\")\n request.all_data[\"allowed_audit_realm\"] = list(set(\n allowed_audit_realms))\n\n return True\n\n\n",
"path": "privacyidea/api/lib/prepolicy.py"
}
] | diff --git a/privacyidea/api/lib/prepolicy.py b/privacyidea/api/lib/prepolicy.py
index 59d6cb8991..2067f84c8c 100644
--- a/privacyidea/api/lib/prepolicy.py
+++ b/privacyidea/api/lib/prepolicy.py
@@ -1301,6 +1301,7 @@ def allowed_audit_realm(request=None, action=None):
action=ACTION.AUDIT,
scope=SCOPE.ADMIN,
user=admin_user.get("username"),
+ adminrealm=admin_user.get("realm"),
client=g.client_ip,
active=True)
diff --git a/tests/test_api_audit.py b/tests/test_api_audit.py
index ef8bcf2125..244a3c32d9 100644
--- a/tests/test_api_audit.py
+++ b/tests/test_api_audit.py
@@ -5,6 +5,8 @@
import datetime
from dateutil.parser import parse as parse_time_string
from dateutil.tz import tzlocal
+from privacyidea.lib.resolver import save_resolver
+from privacyidea.lib.realm import set_realm
PWFILE = "tests/testdata/passwords"
POLICYFILE = "tests/testdata/policy.cfg"
@@ -68,7 +70,6 @@ def test_01_get_statistics(self):
d = parse_time_string(json_response.get("result").get("value").get("time_end"))
self.assertEqual(d, end)
-
def test_02_get_allowed_audit_realm(self):
# Check that an administrator is only allowed to see log entries of
# the defined realms.
@@ -91,17 +92,17 @@ def test_02_get_allowed_audit_realm(self):
self.assertEqual(json_response.get("result").get("value").get(
"count"), 2)
- with self.app.test_request_context('/audit/',
- method='GET',
- data={"realm": "realm2B"},
- headers={
- 'Authorization': self.at}):
- res = self.app.full_dispatch_request()
- self.assertTrue(res.status_code == 200, res)
- json_response = json.loads(res.data)
- self.assertTrue(json_response.get("result").get("status"), res)
- self.assertEqual(json_response.get("result").get("value").get(
- "count"), 3)
+ with self.app.test_request_context('/audit/',
+ method='GET',
+ data={"realm": "realm2B"},
+ headers={
+ 'Authorization': self.at}):
+ res = self.app.full_dispatch_request()
+ self.assertTrue(res.status_code == 200, res)
+ json_response = json.loads(res.data)
+ self.assertTrue(json_response.get("result").get("status"), res)
+ self.assertEqual(json_response.get("result").get("value").get(
+ "count"), 3)
# set policy for audit realms
set_policy("audit01", scope=SCOPE.ADMIN, action=ACTION.AUDIT,
@@ -122,12 +123,99 @@ def test_02_get_allowed_audit_realm(self):
# delete policy
delete_policy("audit01")
+ def test_03_get_allowed_audit_realm(self):
+ # Check than an internal admin is allowed to see all realms
+ # A helpdesk user in "adminrealm" is only allowerd to see realm1A
+ Audit(action="enroll", success=1, realm="realm1A").save()
+ Audit(action="enroll", success=1, realm="realm1A").save()
+ Audit(action="enroll", success=1, realm="realm2B").save()
+ Audit(action="enroll", success=1, realm="realm2B").save()
+ Audit(action="enroll", success=1, realm="realm2B").save()
+
+ # check, that we see all audit entries
+ with self.app.test_request_context('/audit/',
+ method='GET',
+ data={"realm": "realm1A"},
+ headers={'Authorization': self.at}):
+ res = self.app.full_dispatch_request()
+ self.assertTrue(res.status_code == 200, res)
+ json_response = json.loads(res.data)
+ self.assertTrue(json_response.get("result").get("status"), res)
+ self.assertEqual(json_response.get("result").get("value").get(
+ "count"), 5)
+
+ with self.app.test_request_context('/audit/',
+ method='GET',
+ data={"realm": "realm2B"},
+ headers={
+ 'Authorization': self.at}):
+ res = self.app.full_dispatch_request()
+ self.assertTrue(res.status_code == 200, res)
+ json_response = json.loads(res.data)
+ self.assertTrue(json_response.get("result").get("status"), res)
+ self.assertEqual(json_response.get("result").get("value").get(
+ "count"), 7)
+
+ # set policy: helpdesk users in adminrealm are only allowed to
+ # view "realm1A".
+ set_policy("audit01", scope=SCOPE.ADMIN, action=ACTION.AUDIT,
+ adminrealm="adminrealm", realm="realm1A")
+ # Test admin is allowed to view unrestricted logs!
+ set_policy("audit02", scope=SCOPE.ADMIN, action=ACTION.AUDIT,
+ user="testadmin")
+
+ rid = save_resolver({"resolver": self.resolvername1,
+ "type": "passwdresolver",
+ "fileName": PWFILE})
+ self.assertTrue(rid > 0, rid)
+
+ (added, failed) = set_realm("adminrealm",
+ [self.resolvername1])
+ self.assertTrue(len(failed) == 0)
+ self.assertTrue(len(added) == 1)
+
+ helpdesk_authorization = None
+ with self.app.test_request_context('/auth',
+ method='POST', data={'username': 'selfservice@adminrealm',
+ 'password': 'test'}):
+ res = self.app.full_dispatch_request()
+ self.assertTrue(res.status_code == 200, res)
+ json_response = json.loads(res.data)
+ value = json_response.get("result").get("value")
+ # Helpdesk user is allowed to view the audit log.
+ self.assertTrue("auditlog" in value.get("rights"))
+ helpdesk_authorization = value.get("token")
+
+ # check, that we only see allowed audit realms
+ with self.app.test_request_context('/audit/',
+ method='GET',
+ headers={'Authorization': helpdesk_authorization}):
+ res = self.app.full_dispatch_request()
+ self.assertTrue(res.status_code == 200, res)
+ json_response = json.loads(res.data)
+ self.assertTrue(json_response.get("result").get("status"), res)
+ # We now have 3 entries, as we added one by the search in line #43
+ count = json_response.get("result").get("value").get("count")
+ auditdata = json_response.get("result").get("value").get("auditdata")
+ self.assertEqual(count, 6)
+ # All entries are in realm1A!
+ for ad in auditdata:
+ self.assertEqual(ad.get("realm"), "realm1A")
+
+ # Now check, that the testadmin (self.at) see all entries!
+ with self.app.test_request_context('/audit/',
+ method='GET',
+ headers={'Authorization': self.at}):
+ res = self.app.full_dispatch_request()
+ self.assertTrue(res.status_code == 200, res)
+ json_response = json.loads(res.data)
+ self.assertTrue(json_response.get("result").get("status"), res)
+ # We now have 3 entries, as we added one by the search in line #43
+ count = json_response.get("result").get("value").get("count")
+ auditdata = json_response.get("result").get("value").get("auditdata")
+ self.assertEqual(count, 25)
+
+ # delete policy
+ delete_policy("audit01")
+ delete_policy("audit02")
- #def test_01_download_audit(self):
- # with self.app.test_request_context('/audit/auditfile.csv',
- # method='GET',
- # headers={'Authorization': self.at}):
- # res = self.app.full_dispatch_request()
- # self.assertTrue(res.status_code == 200, res)
- # self.assertTrue(res.mimetype == "text/csv", res.mimetype)
- # self.assertTrue(res.stream)
|
google__turbinia-785 | import TurbiniaException to partitions.py
```
Traceback (most recent call last):
File "PATH/v2/lib/python3.8/site-packages/turbinia/workers/__init__.py", line 916, in run_wrapper
self.result = self.run(evidence, self.result)
File "PATH/v2/lib/python3.8/site-packages/turbinia/workers/partitions.py", line 144, in run
path_specs = partitions.Enumerate(evidence)
File "/PATH/v2/lib/python3.8/site-packages/turbinia/processors/partitions.py", line 49, in Enumerate
raise TurbiniaException(
NameError: name 'TurbiniaException' is not defined
2021-03-05 18:45:56 [ERROR] PartitionEnumerationTask Task failed with exception: [name 'TurbiniaException' is not defined]
```
| [
{
"content": "# -*- coding: utf-8 -*-\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Evidence processor to enumerate partitions.\"\"\"\n\nimport logging\n\nfrom dfvfs.helpers import volume_scanner\nfrom dfvfs.lib import definitions as dfvfs_definitions\nfrom dfvfs.lib import errors as dfvfs_errors\n\nfrom turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator\n\nlog = logging.getLogger('turbinia')\n\n\ndef Enumerate(evidence):\n \"\"\"Uses dfVFS to enumerate partitions in a disk / image.\n\n Args:\n evidence: Evidence object to be scanned.\n\n Raises:\n TurbiniaException if source evidence can't be scanned.\n\n Returns:\n list[dfVFS.path_spec]: path specs for identified partitions\n \"\"\"\n dfvfs_definitions.PREFERRED_GPT_BACK_END = (\n dfvfs_definitions.TYPE_INDICATOR_GPT)\n mediator = UnattendedVolumeScannerMediator()\n mediator.credentials = evidence.credentials\n path_specs = []\n try:\n scanner = volume_scanner.VolumeScanner(mediator=mediator)\n path_specs = scanner.GetBasePathSpecs(evidence.local_path)\n except dfvfs_errors.ScannerError as e:\n raise TurbiniaException(\n 'Could not enumerate partitions [{0!s}]: {1!s}'.format(\n evidence.local_path, e))\n\n return path_specs\n\n\ndef GetPartitionEncryptionType(path_spec):\n \"\"\"Checks a partition for encryption.\n\n Args:\n path_spec (dfVFS.path_spec): Partition path_spec.\n\n Returns:\n String representing the type of encryption, or None.\n \"\"\"\n encryption_type = None\n if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:\n encryption_type = 'BDE'\n return encryption_type\n\n\ndef GetPathSpecByLocation(path_specs, location):\n \"\"\"Finds a path_spec from a list of path_specs for a given location.\n\n Args:\n path_specs (list[dfVFS.path_spec]): List of path_specs from volume scanner.\n location (str): dfVFS location to search for.\n\n Returns:\n dfVFS.path_spec for the given location or None if not found.\n \"\"\"\n for path_spec in path_specs:\n child_path_spec = path_spec\n fs_location = getattr(path_spec, 'location', None)\n while path_spec.HasParent():\n type_indicator = path_spec.type_indicator\n if type_indicator in (dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,\n dfvfs_definitions.TYPE_INDICATOR_GPT):\n if fs_location in ('\\\\', '/'):\n fs_location = getattr(path_spec, 'location', None)\n break\n path_spec = path_spec.parent\n if fs_location == location:\n return child_path_spec\n return None\n",
"path": "turbinia/processors/partitions.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Evidence processor to enumerate partitions.\"\"\"\n\nimport logging\n\nfrom dfvfs.helpers import volume_scanner\nfrom dfvfs.lib import definitions as dfvfs_definitions\nfrom dfvfs.lib import errors as dfvfs_errors\n\nfrom turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator\nfrom turbinia import TurbiniaException\n\nlog = logging.getLogger('turbinia')\n\n\ndef Enumerate(evidence):\n \"\"\"Uses dfVFS to enumerate partitions in a disk / image.\n\n Args:\n evidence: Evidence object to be scanned.\n\n Raises:\n TurbiniaException if source evidence can't be scanned.\n\n Returns:\n list[dfVFS.path_spec]: path specs for identified partitions\n \"\"\"\n dfvfs_definitions.PREFERRED_GPT_BACK_END = (\n dfvfs_definitions.TYPE_INDICATOR_GPT)\n mediator = UnattendedVolumeScannerMediator()\n mediator.credentials = evidence.credentials\n path_specs = []\n try:\n scanner = volume_scanner.VolumeScanner(mediator=mediator)\n path_specs = scanner.GetBasePathSpecs(evidence.local_path)\n except dfvfs_errors.ScannerError as e:\n raise TurbiniaException(\n 'Could not enumerate partitions [{0!s}]: {1!s}'.format(\n evidence.local_path, e))\n\n return path_specs\n\n\ndef GetPartitionEncryptionType(path_spec):\n \"\"\"Checks a partition for encryption.\n\n Args:\n path_spec (dfVFS.path_spec): Partition path_spec.\n\n Returns:\n String representing the type of encryption, or None.\n \"\"\"\n encryption_type = None\n if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:\n encryption_type = 'BDE'\n return encryption_type\n\n\ndef GetPathSpecByLocation(path_specs, location):\n \"\"\"Finds a path_spec from a list of path_specs for a given location.\n\n Args:\n path_specs (list[dfVFS.path_spec]): List of path_specs from volume scanner.\n location (str): dfVFS location to search for.\n\n Returns:\n dfVFS.path_spec for the given location or None if not found.\n \"\"\"\n for path_spec in path_specs:\n child_path_spec = path_spec\n fs_location = getattr(path_spec, 'location', None)\n while path_spec.HasParent():\n type_indicator = path_spec.type_indicator\n if type_indicator in (dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,\n dfvfs_definitions.TYPE_INDICATOR_GPT):\n if fs_location in ('\\\\', '/'):\n fs_location = getattr(path_spec, 'location', None)\n break\n path_spec = path_spec.parent\n if fs_location == location:\n return child_path_spec\n return None\n",
"path": "turbinia/processors/partitions.py"
}
] | diff --git a/turbinia/processors/partitions.py b/turbinia/processors/partitions.py
index 65008fd40..142145ed6 100644
--- a/turbinia/processors/partitions.py
+++ b/turbinia/processors/partitions.py
@@ -21,6 +21,7 @@
from dfvfs.lib import errors as dfvfs_errors
from turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator
+from turbinia import TurbiniaException
log = logging.getLogger('turbinia')
|
ansible__ansible-modules-core-5047 | junos_command errors out with "TypeError: Type 'str' cannot be serialized"
<!--- Verify first that your issue/request is not already reported in GitHub -->
##### ISSUE TYPE
<!--- Pick one below and delete the rest: -->
- Bug Report
##### COMPONENT NAME
<!--- Name of the plugin/module/task -->
junos_command core module
##### ANSIBLE VERSION
<!--- Paste verbatim output from “ansible --version” between quotes below -->
```
$ ansible --version
ansible 2.1.0.0
config file = /etc/ansible/ansible.cfg
configured module search path = Default w/o overrides
```
##### CONFIGURATION
<!---
Mention any settings you have changed/added/removed in ansible.cfg
(or using the ANSIBLE_* environment variables).
-->
No changes to configuration
##### OS / ENVIRONMENT
<!---
Mention the OS you are running Ansible from, and the OS you are
managing, or say “N/A” for anything that is not platform-specific.
-->
$ uname -a
Linux dev-net-01 4.4.0-31-generic #50-Ubuntu SMP Wed Jul 13 00:07:12 UTC 2016 x86_64 x86_64 x86_64 GNU/Linux
##### SUMMARY
<!--- Explain the problem briefly -->
I have an Ansible script where i am simply using junos_command module to get users list from Juniper switch, below is the snippet of my code. I keep getting the RuntimeWarning and TypeError: type 'str' cannot be serialized, whenever i try to run this. Moreover I have been successfully able to run commands like 'show version' using the below code itself. But just not 'show configuration system login' command. Please look into this.
**Script:**
name: / GET USERS / Get list of all the current users on switch
action: junos_command
args: { commands: 'show configuration system login',
provider: "{{ netconf }}" }
register: curr_users_on_switch
**Error:**
TASK [/ GET USERS / Get list of all the current users on switch] ***************
fatal: [rlab-er1]: FAILED! => {"changed": false, "failed": true, "module_stderr": "/home/mbhadoria/.local/lib/python2.7/site-packages/jnpr/junos/device.py:429: RuntimeWarning: CLI command is for debug use only!
\n warnings.warn(\"CLI command is for debug use only!\", RuntimeWarning)\nTraceback (most recent call last):
\n File \"/tmp/ansible_lVOmPp/ansible_module_junos_command.py\", line 261, in <module>
\n main()
\n File \"/tmp/ansible_lVOmPp/ansible_module_junos_command.py\", line 233, in main
\n xmlout.append(xml_to_string(response[index]))
\n File \"/tmp/ansible_lVOmPp/ansible_modlib.zip/ansible/module_utils/junos.py\", line 79, in xml_to_string\n File \"src/lxml/lxml.etree.pyx\", line 3350, in lxml.etree.tostring (src/lxml/lxml.etree.c:84534)\nTypeError: Type 'str' cannot be serialized.
\n", "module_stdout": "", "msg": "MODULE FAILURE", "parsed": false}
##### STEPS TO REPRODUCE
<!---
For bugs, show exactly how to reproduce the problem.
For new features, show how the feature would be used.
-->
Mentioned in above section
<!--- Paste example playbooks or commands between quotes below -->
```
name: / GET USERS / Get list of all the current users on switch
action: junos_command
args: { commands: 'show configuration system login',
provider: "{{ netconf }}" }
register: curr_users_on_switch
```
<!--- You can also paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- What did you expect to happen when running the steps above? -->
returns the list of users on juniper switch. no error should be expected.
##### ACTUAL RESULTS
<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes below -->
```
TASK [/ GET USERS / Get list of all the current users on switch] ***************
<rlab-er1> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo $HOME/.ansible/tmp/ansible-tmp-1472681123.92-107492843053729 `" && echo ansible-tmp-1472681123.92-107492843053729="` echo $HOME/.ansible/tmp/ansible-tmp-1472681123.92-107492843053729 `" ) && sleep 0'
<rlab-er1> PUT /tmp/tmpU9G6IE TO /home/mbhadoria/.ansible/tmp/ansible-tmp-1472681123.92-107492843053729/junos_command
<rlab-er1> EXEC /bin/sh -c 'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/mbhadoria/.ansible/tmp/ansible-tmp-1472681123.92-107492843053729/junos_command; rm -rf "/home/mbhadoria/.ansible/tmp/ansible-tmp-1472681123.92-107492843053729/" > /dev/null 2>&1 && sleep 0'
fatal: [rlab-er1]: FAILED! => {"changed": false, "failed": true, "invocation": {"module_name": "junos_command"}, "module_stderr": "/home/mbhadoria/.local/lib/python2.7/site-packages/jnpr/junos/device.py:429: RuntimeWarning: CLI command is for debug use only!\n warnings.warn(\"CLI command is for debug use only!\", RuntimeWarning)\nTraceback (most recent call last):\n File \"/tmp/ansible_mdpif7/ansible_module_junos_command.py\", line 261, in <module>\n main()\n File \"/tmp/ansible_mdpif7/ansible_module_junos_command.py\", line 233, in main\n xmlout.append(xml_to_string(response[index]))\n File \"/tmp/ansible_mdpif7/ansible_modlib.zip/ansible/module_utils/junos.py\", line 79, in xml_to_string\n File \"src/lxml/lxml.etree.pyx\", line 3350, in lxml.etree.tostring (src/lxml/lxml.etree.c:84534)\nTypeError: Type 'str' cannot be serialized.\n", "module_stdout": "", "msg": "MODULE FAILURE", "parsed": false}
```
| [
{
"content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = \"\"\"\n---\nmodule: junos_command\nversion_added: \"2.1\"\nauthor: \"Peter Sprygada (@privateip)\"\nshort_description: Execute arbitrary commands on a remote device running Junos\ndescription:\n - Network devices running the Junos operating system provide a command\n driven interface both over CLI and RPC. This module provides an\n interface to execute commands using these functions and return the\n results to the Ansible playbook. In addition, this\n module can specify a set of conditionals to be evaluated against the\n returned output, only returning control to the playbook once the\n entire set of conditionals has been met.\nextends_documentation_fragment: junos\noptions:\n commands:\n description:\n - The C(commands) to send to the remote device over the Netconf\n transport. The resulting output from the command\n is returned. If the I(wait_for) argument is provided, the\n module is not returned until the condition is satisfied or\n the number of I(retries) has been exceeded.\n required: false\n default: null\n rpcs:\n description:\n - The C(rpcs) argument accepts a list of RPCs to be executed\n over a netconf session and the results from the RPC execution\n is return to the playbook via the modules results dictionary.\n required: false\n default: null\n wait_for:\n description:\n - Specifies what to evaluate from the output of the command\n and what conditionals to apply. This argument will cause\n the task to wait for a particular conditional to be true\n before moving forward. If the conditional is not true\n by the configured retries, the task fails. See examples.\n required: false\n default: null\n aliases: ['waitfor']\n version_added: \"2.2\"\n match:\n description:\n - The I(match) argument is used in conjunction with the\n I(wait_for) argument to specify the match policy. Valid\n values are C(all) or C(any). If the value is set to C(all)\n then all conditionals in the I(wait_for) must be satisfied. If\n the value is set to C(any) then only one of the values must be\n satisfied.\n required: false\n default: all\n choices: ['any', 'all']\n version_added: \"2.2\"\n retries:\n description:\n - Specifies the number of retries a command should by tried\n before it is considered failed. The command is run on the\n target device every retry and evaluated against the I(waitfor)\n conditionals.\n required: false\n default: 10\n interval:\n description:\n - Configures the interval in seconds to wait between retries\n of the command. If the command does not pass the specified\n conditional, the interval indicates how to long to wait before\n trying the command again.\n required: false\n default: 1\n format:\n description:\n - Configures the encoding scheme to use when serializing output\n from the device. This handles how to properly understand the\n output and apply the conditionals path to the result set.\n required: false\n default: 'xml'\n choices: ['xml', 'text']\nrequirements:\n - junos-eznc\nnotes:\n - This module requires the netconf system service be enabled on\n the remote device being managed\n\"\"\"\n\nEXAMPLES = \"\"\"\n# Note: examples below use the following provider dict to handle\n# transport and authentication to the node.\nvars:\n netconf:\n host: \"{{ inventory_hostname }}\"\n username: ansible\n password: Ansible\n\n- name: run a set of commands\n junos_command:\n commands: ['show version', 'show ip route']\n provider: \"{{ netconf }}\"\n\n- name: run a command with a conditional applied to the second command\n junos_command:\n commands:\n - show version\n - show interfaces fxp0\n waitfor:\n - \"result[1].interface-information.physical-interface.name eq fxp0\"\n provider: \"{{ netconf }}\"\n\n- name: collect interface information using rpc\n junos_command:\n rpcs:\n - \"get_interface_information interface=em0 media=True\"\n - \"get_interface_information interface=fxp0 media=True\"\n provider: \"{{ netconf }}\"\n\"\"\"\n\nRETURN = \"\"\"\nstdout:\n description: The output from the commands read from the device\n returned: always\n type: list\n sample: ['...', '...']\n\nstdout_lines:\n description: The output read from the device split into lines\n returned: always\n type: list\n sample: [['...', '...'], ['...', '...']]\n\nfailed_conditionals:\n description: the conditionals that failed\n retured: failed\n type: list\n sample: ['...', '...']\n\nxml:\n description: The raw XML reply from the device\n returned: when format is xml\n type: list\n sample: [['...', '...'], ['...', '...']]\n\"\"\"\nimport re\n\nimport ansible.module_utils.junos\n\n\nfrom ansible.module_utils.basic import get_exception\nfrom ansible.module_utils.network import NetworkModule, NetworkError\nfrom ansible.module_utils.netcli import CommandRunner\nfrom ansible.module_utils.netcli import AddCommandError, FailedConditionsError\nfrom ansible.module_utils.junos import xml_to_json\n\nVALID_KEYS = {\n 'cli': frozenset(['command', 'output', 'prompt', 'response']),\n 'rpc': frozenset(['command', 'output'])\n}\n\n\ndef to_lines(stdout):\n for item in stdout:\n if isinstance(item, basestring):\n item = str(item).split('\\n')\n yield item\n\ndef parse(module, command_type):\n if command_type == 'cli':\n items = module.params['commands']\n elif command_type == 'rpc':\n items = module.params['rpcs']\n\n parsed = list()\n for item in (items or list()):\n if isinstance(item, basestring):\n item = dict(command=item, output=None)\n elif 'command' not in item:\n module.fail_json(msg='command keyword argument is required')\n elif item.get('output') not in [None, 'text', 'xml']:\n module.fail_json(msg='invalid output specified for command'\n 'Supported values are `text` or `xml`')\n elif not set(item.keys()).issubset(VALID_KEYS[command_type]):\n module.fail_json(msg='unknown command keyword specified. Valid '\n 'values are %s' % ', '.join(VALID_KEYS[command_type]))\n\n if not item['output']:\n item['output'] = module.params['display']\n\n item['command_type'] = command_type\n\n parsed.append(item)\n\n return parsed\n\n\ndef main():\n \"\"\"main entry point for Ansible module\n \"\"\"\n\n spec = dict(\n commands=dict(type='list'),\n rpcs=dict(type='list'),\n\n display=dict(default='xml', choices=['text', 'xml'],\n aliases=['format', 'output']),\n\n wait_for=dict(type='list', aliases=['waitfor']),\n match=dict(default='all', choices=['all', 'any']),\n\n retries=dict(default=10, type='int'),\n interval=dict(default=1, type='int'),\n\n transport=dict(default='netconf', choices=['netconf'])\n )\n\n mutually_exclusive = [('commands', 'rpcs')]\n\n module = NetworkModule(argument_spec=spec,\n mutually_exclusive=mutually_exclusive,\n supports_check_mode=True)\n\n commands = list()\n for key in VALID_KEYS.keys():\n commands.extend(list(parse(module, key)))\n\n conditionals = module.params['wait_for'] or list()\n\n warnings = list()\n\n runner = CommandRunner(module)\n\n for cmd in commands:\n if module.check_mode and not cmd['command'].startswith('show'):\n warnings.append('only show commands are supported when using '\n 'check mode, not executing `%s`' % cmd['command'])\n else:\n if cmd['command'].startswith('co'):\n module.fail_json(msg='junos_command does not support running '\n 'config mode commands. Please use '\n 'junos_config instead')\n try:\n runner.add_command(**cmd)\n except AddCommandError:\n exc = get_exception()\n warnings.append('duplicate command detected: %s' % cmd)\n\n for item in conditionals:\n runner.add_conditional(item)\n\n runner.retries = module.params['retries']\n runner.interval = module.params['interval']\n runner.match = module.params['match']\n\n try:\n runner.run()\n except FailedConditionsError:\n exc = get_exception()\n module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions)\n except NetworkError:\n exc = get_exception()\n module.fail_json(msg=str(exc))\n\n result = dict(changed=False, stdout=list())\n xmlout = list()\n\n for cmd in commands:\n try:\n output = runner.get_command(cmd['command'], cmd.get('output'))\n xmlout.append(output)\n output = xml_to_json(output)\n except ValueError:\n output = 'command not executed due to check_mode, see warnings'\n result['stdout'].append(output)\n\n result['warnings'] = warnings\n result['xml'] = xmlout\n result['stdout_lines'] = list(to_lines(result['stdout']))\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n\n",
"path": "network/junos/junos_command.py"
}
] | [
{
"content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = \"\"\"\n---\nmodule: junos_command\nversion_added: \"2.1\"\nauthor: \"Peter Sprygada (@privateip)\"\nshort_description: Execute arbitrary commands on a remote device running Junos\ndescription:\n - Network devices running the Junos operating system provide a command\n driven interface both over CLI and RPC. This module provides an\n interface to execute commands using these functions and return the\n results to the Ansible playbook. In addition, this\n module can specify a set of conditionals to be evaluated against the\n returned output, only returning control to the playbook once the\n entire set of conditionals has been met.\nextends_documentation_fragment: junos\noptions:\n commands:\n description:\n - The C(commands) to send to the remote device over the Netconf\n transport. The resulting output from the command\n is returned. If the I(wait_for) argument is provided, the\n module is not returned until the condition is satisfied or\n the number of I(retries) has been exceeded.\n required: false\n default: null\n rpcs:\n description:\n - The C(rpcs) argument accepts a list of RPCs to be executed\n over a netconf session and the results from the RPC execution\n is return to the playbook via the modules results dictionary.\n required: false\n default: null\n wait_for:\n description:\n - Specifies what to evaluate from the output of the command\n and what conditionals to apply. This argument will cause\n the task to wait for a particular conditional to be true\n before moving forward. If the conditional is not true\n by the configured retries, the task fails. See examples.\n required: false\n default: null\n aliases: ['waitfor']\n version_added: \"2.2\"\n match:\n description:\n - The I(match) argument is used in conjunction with the\n I(wait_for) argument to specify the match policy. Valid\n values are C(all) or C(any). If the value is set to C(all)\n then all conditionals in the I(wait_for) must be satisfied. If\n the value is set to C(any) then only one of the values must be\n satisfied.\n required: false\n default: all\n choices: ['any', 'all']\n version_added: \"2.2\"\n retries:\n description:\n - Specifies the number of retries a command should by tried\n before it is considered failed. The command is run on the\n target device every retry and evaluated against the I(waitfor)\n conditionals.\n required: false\n default: 10\n interval:\n description:\n - Configures the interval in seconds to wait between retries\n of the command. If the command does not pass the specified\n conditional, the interval indicates how to long to wait before\n trying the command again.\n required: false\n default: 1\n format:\n description:\n - Configures the encoding scheme to use when serializing output\n from the device. This handles how to properly understand the\n output and apply the conditionals path to the result set.\n required: false\n default: 'xml'\n choices: ['xml', 'text']\nrequirements:\n - junos-eznc\nnotes:\n - This module requires the netconf system service be enabled on\n the remote device being managed\n\"\"\"\n\nEXAMPLES = \"\"\"\n# Note: examples below use the following provider dict to handle\n# transport and authentication to the node.\nvars:\n netconf:\n host: \"{{ inventory_hostname }}\"\n username: ansible\n password: Ansible\n\n- name: run a set of commands\n junos_command:\n commands: ['show version', 'show ip route']\n provider: \"{{ netconf }}\"\n\n- name: run a command with a conditional applied to the second command\n junos_command:\n commands:\n - show version\n - show interfaces fxp0\n waitfor:\n - \"result[1].interface-information.physical-interface.name eq fxp0\"\n provider: \"{{ netconf }}\"\n\n- name: collect interface information using rpc\n junos_command:\n rpcs:\n - \"get_interface_information interface=em0 media=True\"\n - \"get_interface_information interface=fxp0 media=True\"\n provider: \"{{ netconf }}\"\n\"\"\"\n\nRETURN = \"\"\"\nstdout:\n description: The output from the commands read from the device\n returned: always\n type: list\n sample: ['...', '...']\n\nstdout_lines:\n description: The output read from the device split into lines\n returned: always\n type: list\n sample: [['...', '...'], ['...', '...']]\n\nfailed_conditionals:\n description: the conditionals that failed\n retured: failed\n type: list\n sample: ['...', '...']\n\"\"\"\nimport re\n\nimport ansible.module_utils.junos\n\nfrom ansible.module_utils.basic import get_exception\nfrom ansible.module_utils.network import NetworkModule, NetworkError\nfrom ansible.module_utils.netcli import CommandRunner\nfrom ansible.module_utils.netcli import AddCommandError, FailedConditionsError\n\nVALID_KEYS = {\n 'cli': frozenset(['command', 'output', 'prompt', 'response']),\n 'rpc': frozenset(['command', 'output'])\n}\n\n\ndef to_lines(stdout):\n for item in stdout:\n if isinstance(item, basestring):\n item = str(item).split('\\n')\n yield item\n\ndef parse(module, command_type):\n if command_type == 'cli':\n items = module.params['commands']\n elif command_type == 'rpc':\n items = module.params['rpcs']\n\n parsed = list()\n for item in (items or list()):\n if isinstance(item, basestring):\n item = dict(command=item, output=None)\n elif 'command' not in item:\n module.fail_json(msg='command keyword argument is required')\n elif item.get('output') not in [None, 'text', 'xml']:\n module.fail_json(msg='invalid output specified for command'\n 'Supported values are `text` or `xml`')\n elif not set(item.keys()).issubset(VALID_KEYS[command_type]):\n module.fail_json(msg='unknown command keyword specified. Valid '\n 'values are %s' % ', '.join(VALID_KEYS[command_type]))\n\n if not item['output']:\n item['output'] = module.params['display']\n\n item['command_type'] = command_type\n\n # show configuration [options] will return as text\n if item['command'].startswith('show configuration'):\n item['output'] = 'text'\n\n parsed.append(item)\n\n return parsed\n\n\ndef main():\n \"\"\"main entry point for Ansible module\n \"\"\"\n\n spec = dict(\n commands=dict(type='list'),\n rpcs=dict(type='list'),\n\n display=dict(default='xml', choices=['text', 'xml'],\n aliases=['format', 'output']),\n\n wait_for=dict(type='list', aliases=['waitfor']),\n match=dict(default='all', choices=['all', 'any']),\n\n retries=dict(default=10, type='int'),\n interval=dict(default=1, type='int'),\n\n transport=dict(default='netconf', choices=['netconf'])\n )\n\n mutually_exclusive = [('commands', 'rpcs')]\n\n module = NetworkModule(argument_spec=spec,\n mutually_exclusive=mutually_exclusive,\n supports_check_mode=True)\n\n commands = list()\n for key in VALID_KEYS.keys():\n commands.extend(list(parse(module, key)))\n\n conditionals = module.params['wait_for'] or list()\n\n warnings = list()\n\n runner = CommandRunner(module)\n\n for cmd in commands:\n if module.check_mode and not cmd['command'].startswith('show'):\n warnings.append('only show commands are supported when using '\n 'check mode, not executing `%s`' % cmd['command'])\n else:\n if cmd['command'].startswith('co'):\n module.fail_json(msg='junos_command does not support running '\n 'config mode commands. Please use '\n 'junos_config instead')\n try:\n runner.add_command(**cmd)\n except AddCommandError:\n exc = get_exception()\n warnings.append('duplicate command detected: %s' % cmd)\n\n for item in conditionals:\n runner.add_conditional(item)\n\n runner.retries = module.params['retries']\n runner.interval = module.params['interval']\n runner.match = module.params['match']\n\n try:\n runner.run()\n except FailedConditionsError:\n exc = get_exception()\n module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions)\n except NetworkError:\n exc = get_exception()\n module.fail_json(msg=str(exc))\n\n result = dict(changed=False, stdout=list())\n\n for cmd in commands:\n try:\n output = runner.get_command(cmd['command'], cmd.get('output'))\n except ValueError:\n output = 'command not executed due to check_mode, see warnings'\n result['stdout'].append(output)\n\n result['warnings'] = warnings\n result['stdout_lines'] = list(to_lines(result['stdout']))\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n\n",
"path": "network/junos/junos_command.py"
}
] | diff --git a/network/junos/junos_command.py b/network/junos/junos_command.py
index 3286b0614e7..28223bfae49 100644
--- a/network/junos/junos_command.py
+++ b/network/junos/junos_command.py
@@ -197,6 +197,10 @@ def parse(module, command_type):
item['command_type'] = command_type
+ # show configuration [options] will return as text
+ if item['command'].startswith('show configuration'):
+ item['output'] = 'text'
+
parsed.append(item)
return parsed
|
pyca__cryptography-4077 | utils.int_from_bytes gives incorrect answers when passed "builtins.bytes" in python 2.7
```
$ mkvirtualenv repro
$ python --version
Python 2.7.12
$ pip install cryptography future
$ python
from cryptography import utils
from builtins import bytes
x = bytes.fromhex('deadbeef')
y = utils.int_from_bytes(x, 'big')
hex(y)
'0x6227deadbeef27'
```
The reason this happens is that `int_from_bytes` (in py27 mode) casts the passed-in value to `bytes`, which, in py27 mode, is an alias for `str`. Passing a `builtins.bytes` value to `str` somewhat insanely wraps the string with `b'` and `'`. These then get parsed by the rest of `int_from_bytes` as if they were part of the original byte string.
I think this is particularly unfortunate since all the "cryptography" functions say they accept and return `bytes` in their docstrings. Ideally it'd be compatible with all three definitions of `bytes`: the py27 alias to `str`, the one from "future", and the py3 one.
| [
{
"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport binascii\nimport inspect\nimport sys\nimport warnings\n\n\n# We use a UserWarning subclass, instead of DeprecationWarning, because CPython\n# decided deprecation warnings should be invisble by default.\nclass CryptographyDeprecationWarning(UserWarning):\n pass\n\n\n# Several APIs were deprecated with no specific end-of-life date because of the\n# ubiquity of their use. They should not be removed until we agree on when that\n# cycle ends.\nPersistentlyDeprecated = CryptographyDeprecationWarning\nDeprecatedIn21 = CryptographyDeprecationWarning\n\n\ndef _check_bytes(name, value):\n if not isinstance(value, bytes):\n raise TypeError(\"{0} must be bytes\".format(name))\n\n\ndef read_only_property(name):\n return property(lambda self: getattr(self, name))\n\n\ndef register_interface(iface):\n def register_decorator(klass):\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\ndef register_interface_if(predicate, iface):\n def register_decorator(klass):\n if predicate:\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\nif hasattr(int, \"from_bytes\"):\n int_from_bytes = int.from_bytes\nelse:\n def int_from_bytes(data, byteorder, signed=False):\n assert byteorder == 'big'\n assert not signed\n\n # call bytes() on data to allow the use of bytearrays\n return int(bytes(data).encode('hex'), 16)\n\n\nif hasattr(int, \"to_bytes\"):\n def int_to_bytes(integer, length=None):\n return integer.to_bytes(\n length or (integer.bit_length() + 7) // 8 or 1, 'big'\n )\nelse:\n def int_to_bytes(integer, length=None):\n hex_string = '%x' % integer\n if length is None:\n n = len(hex_string)\n else:\n n = length * 2\n return binascii.unhexlify(hex_string.zfill(n + (n & 1)))\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\nif hasattr(inspect, \"signature\"):\n signature = inspect.signature\nelse:\n signature = inspect.getargspec\n\n\ndef verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n raise InterfaceNotImplemented(\n \"{0} is missing a {1!r} method\".format(klass, method)\n )\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n sig = signature(getattr(iface, method))\n actual = signature(getattr(klass, method))\n if sig != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n klass, method, sig, actual\n )\n )\n\n\n# No longer needed as of 2.2, but retained because we have external consumers\n# who use it.\ndef bit_length(x):\n return x.bit_length()\n\n\nclass _DeprecatedValue(object):\n def __init__(self, value, message, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(object):\n def __init__(self, module):\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr, value):\n setattr(self._module, attr, value)\n\n def __delattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n\n delattr(self._module, attr)\n\n def __dir__(self):\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(value, module_name, message, warning_class):\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = _ModuleWithDeprecations(module)\n return _DeprecatedValue(value, message, warning_class)\n\n\ndef cached_property(func):\n cached_name = \"_cached_{0}\".format(func)\n sentinel = object()\n\n def inner(instance):\n cache = getattr(instance, cached_name, sentinel)\n if cache is not sentinel:\n return cache\n result = func(instance)\n setattr(instance, cached_name, result)\n return result\n return property(inner)\n",
"path": "src/cryptography/utils.py"
}
] | [
{
"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport binascii\nimport inspect\nimport sys\nimport warnings\n\n\n# We use a UserWarning subclass, instead of DeprecationWarning, because CPython\n# decided deprecation warnings should be invisble by default.\nclass CryptographyDeprecationWarning(UserWarning):\n pass\n\n\n# Several APIs were deprecated with no specific end-of-life date because of the\n# ubiquity of their use. They should not be removed until we agree on when that\n# cycle ends.\nPersistentlyDeprecated = CryptographyDeprecationWarning\nDeprecatedIn21 = CryptographyDeprecationWarning\n\n\ndef _check_bytes(name, value):\n if not isinstance(value, bytes):\n raise TypeError(\"{0} must be bytes\".format(name))\n\n\ndef read_only_property(name):\n return property(lambda self: getattr(self, name))\n\n\ndef register_interface(iface):\n def register_decorator(klass):\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\ndef register_interface_if(predicate, iface):\n def register_decorator(klass):\n if predicate:\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\nif hasattr(int, \"from_bytes\"):\n int_from_bytes = int.from_bytes\nelse:\n def int_from_bytes(data, byteorder, signed=False):\n assert byteorder == 'big'\n assert not signed\n\n return int(binascii.hexlify(data), 16)\n\n\nif hasattr(int, \"to_bytes\"):\n def int_to_bytes(integer, length=None):\n return integer.to_bytes(\n length or (integer.bit_length() + 7) // 8 or 1, 'big'\n )\nelse:\n def int_to_bytes(integer, length=None):\n hex_string = '%x' % integer\n if length is None:\n n = len(hex_string)\n else:\n n = length * 2\n return binascii.unhexlify(hex_string.zfill(n + (n & 1)))\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\nif hasattr(inspect, \"signature\"):\n signature = inspect.signature\nelse:\n signature = inspect.getargspec\n\n\ndef verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n raise InterfaceNotImplemented(\n \"{0} is missing a {1!r} method\".format(klass, method)\n )\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n sig = signature(getattr(iface, method))\n actual = signature(getattr(klass, method))\n if sig != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n klass, method, sig, actual\n )\n )\n\n\n# No longer needed as of 2.2, but retained because we have external consumers\n# who use it.\ndef bit_length(x):\n return x.bit_length()\n\n\nclass _DeprecatedValue(object):\n def __init__(self, value, message, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(object):\n def __init__(self, module):\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr, value):\n setattr(self._module, attr, value)\n\n def __delattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n\n delattr(self._module, attr)\n\n def __dir__(self):\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(value, module_name, message, warning_class):\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = _ModuleWithDeprecations(module)\n return _DeprecatedValue(value, message, warning_class)\n\n\ndef cached_property(func):\n cached_name = \"_cached_{0}\".format(func)\n sentinel = object()\n\n def inner(instance):\n cache = getattr(instance, cached_name, sentinel)\n if cache is not sentinel:\n return cache\n result = func(instance)\n setattr(instance, cached_name, result)\n return result\n return property(inner)\n",
"path": "src/cryptography/utils.py"
}
] | diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py
index d69ed89fcef2..14909c66bb6e 100644
--- a/src/cryptography/utils.py
+++ b/src/cryptography/utils.py
@@ -57,8 +57,7 @@ def int_from_bytes(data, byteorder, signed=False):
assert byteorder == 'big'
assert not signed
- # call bytes() on data to allow the use of bytearrays
- return int(bytes(data).encode('hex'), 16)
+ return int(binascii.hexlify(data), 16)
if hasattr(int, "to_bytes"):
diff --git a/tests/test_cryptography_utils.py b/tests/test_cryptography_utils.py
index 320f7aa8ab85..ddea7602c124 100644
--- a/tests/test_cryptography_utils.py
+++ b/tests/test_cryptography_utils.py
@@ -11,6 +11,8 @@
def test_int_from_bytes_bytearray():
assert utils.int_from_bytes(bytearray(b"\x02\x10"), "big") == 528
+ with pytest.raises(TypeError):
+ utils.int_from_bytes(["list", "is", "not", "bytes"], "big")
class TestCachedProperty(object):
|
feast-dev__feast-3083 | Missing key error in snowflake_python_type_to_feast_value_type in type_map for numpy datetime64 with timezone
## Expected Behavior
Feast should be able to handle different source column data types when updating feature views with inferred features. Specifically all possible `datetime64` python data types with specific timezones should be handled.
## Current Behavior
Snowflake python type `datetime64[ns, america/los_angeles]` does not have a corresponding feast ValueType. There's a ValueType for datetime64[ns] but not a numpy datetime64 with a specific timezone
```
File "/opt/homebrew/anaconda3/envs/feast-python37/lib/python3.7/site-packages/feast/type_map.py", line 536, in snowflake_python_type_to_feast_value_type
return type_map[snowflake_python_type_as_str.lower()]
KeyError: 'datetime64[ns, america/los_angeles]'
```
## Steps to reproduce
### Specifications
- Version: 0.21.3
- Platform: Mac OSX Monterey 12.4
- Subsystem:
## Possible Solution
| [
{
"content": "import configparser\nimport os\nimport random\nimport string\nfrom logging import getLogger\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\nfrom typing import Any, Dict, Iterator, List, Optional, Tuple, cast\n\nimport pandas as pd\nimport pyarrow\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\nfrom tenacity import (\n retry,\n retry_if_exception_type,\n stop_after_attempt,\n wait_exponential,\n)\n\nfrom feast.errors import SnowflakeIncompleteConfig, SnowflakeQueryUnknownError\n\ntry:\n import snowflake.connector\n from snowflake.connector import ProgrammingError, SnowflakeConnection\n from snowflake.connector.cursor import SnowflakeCursor\nexcept ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"snowflake\", str(e))\n\n\ngetLogger(\"snowflake.connector.cursor\").disabled = True\ngetLogger(\"snowflake.connector.connection\").disabled = True\ngetLogger(\"snowflake.connector.network\").disabled = True\nlogger = getLogger(__name__)\n\n\ndef execute_snowflake_statement(conn: SnowflakeConnection, query) -> SnowflakeCursor:\n cursor = conn.cursor().execute(query)\n if cursor is None:\n raise SnowflakeQueryUnknownError(query)\n return cursor\n\n\ndef get_snowflake_conn(config, autocommit=True) -> SnowflakeConnection:\n assert config.type in [\"snowflake.offline\", \"snowflake.online\"]\n\n if config.type == \"snowflake.offline\":\n config_header = \"connections.feast_offline_store\"\n elif config.type == \"snowflake.online\":\n config_header = \"connections.feast_online_store\"\n\n config_dict = dict(config)\n\n # read config file\n config_reader = configparser.ConfigParser()\n config_reader.read([config_dict[\"config_path\"]])\n kwargs: Dict[str, Any] = {}\n if config_reader.has_section(config_header):\n kwargs = dict(config_reader[config_header])\n\n if \"schema\" in kwargs:\n kwargs[\"schema_\"] = kwargs.pop(\"schema\")\n\n kwargs.update((k, v) for k, v in config_dict.items() if v is not None)\n\n for k, v in kwargs.items():\n if k in [\"role\", \"warehouse\", \"database\", \"schema_\"]:\n kwargs[k] = f'\"{v}\"'\n\n if \"schema_\" in kwargs:\n kwargs[\"schema\"] = kwargs.pop(\"schema_\")\n else:\n kwargs[\"schema\"] = '\"PUBLIC\"'\n\n # https://docs.snowflake.com/en/user-guide/python-connector-example.html#using-key-pair-authentication-key-pair-rotation\n # https://docs.snowflake.com/en/user-guide/key-pair-auth.html#configuring-key-pair-authentication\n if \"private_key\" in kwargs:\n kwargs[\"private_key\"] = parse_private_key_path(\n kwargs[\"private_key\"], kwargs[\"private_key_passphrase\"]\n )\n\n try:\n conn = snowflake.connector.connect(\n application=\"feast\",\n autocommit=autocommit,\n **kwargs,\n )\n\n return conn\n except KeyError as e:\n raise SnowflakeIncompleteConfig(e)\n\n\n# TO DO -- sfc-gh-madkins\n# Remove dependency on write_pandas function by falling back to native snowflake python connector\n# Current issue is datetime[ns] types are read incorrectly in Snowflake, need to coerce to datetime[ns, UTC]\ndef write_pandas(\n conn: SnowflakeConnection,\n df: pd.DataFrame,\n table_name: str,\n database: Optional[str] = None,\n schema: Optional[str] = None,\n chunk_size: Optional[int] = None,\n compression: str = \"gzip\",\n on_error: str = \"abort_statement\",\n parallel: int = 4,\n quote_identifiers: bool = True,\n auto_create_table: bool = False,\n create_temp_table: bool = False,\n):\n \"\"\"Allows users to most efficiently write back a pandas DataFrame to Snowflake.\n\n It works by dumping the DataFrame into Parquet files, uploading them and finally copying their data into the table.\n\n Returns whether all files were ingested correctly, number of chunks uploaded, and number of rows ingested\n with all of the COPY INTO command's output for debugging purposes.\n\n Example usage:\n import pandas\n from snowflake.connector.pandas_tools import write_pandas\n\n df = pandas.DataFrame([('Mark', 10), ('Luke', 20)], columns=['name', 'balance'])\n success, nchunks, nrows, _ = write_pandas(cnx, df, 'customers')\n\n Args:\n conn: Connection to be used to communicate with Snowflake.\n df: Dataframe we'd like to write back.\n table_name: Table name where we want to insert into.\n database: Database table is in, if not provided the connection one will be used.\n schema: Schema table is in, if not provided the connection one will be used.\n chunk_size: Number of elements to be inserted once, if not provided all elements will be dumped once\n (Default value = None).\n compression: The compression used on the Parquet files, can only be gzip, or snappy. Gzip gives supposedly a\n better compression, while snappy is faster. Use whichever is more appropriate (Default value = 'gzip').\n on_error: Action to take when COPY INTO statements fail, default follows documentation at:\n https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html#copy-options-copyoptions\n (Default value = 'abort_statement').\n parallel: Number of threads to be used when uploading chunks, default follows documentation at:\n https://docs.snowflake.com/en/sql-reference/sql/put.html#optional-parameters (Default value = 4).\n quote_identifiers: By default, identifiers, specifically database, schema, table and column names\n (from df.columns) will be quoted. If set to False, identifiers are passed on to Snowflake without quoting.\n I.e. identifiers will be coerced to uppercase by Snowflake. (Default value = True)\n auto_create_table: When true, will automatically create a table with corresponding columns for each column in\n the passed in DataFrame. The table will not be created if it already exists\n create_temp_table: Will make the auto-created table as a temporary table\n \"\"\"\n\n cursor: SnowflakeCursor = conn.cursor()\n stage_name = create_temporary_sfc_stage(cursor)\n\n upload_df(df, cursor, stage_name, chunk_size, parallel, compression)\n copy_uploaded_data_to_table(\n cursor,\n stage_name,\n list(df.columns),\n table_name,\n database,\n schema,\n compression,\n on_error,\n quote_identifiers,\n auto_create_table,\n create_temp_table,\n )\n\n\ndef write_parquet(\n conn: SnowflakeConnection,\n path: Path,\n dataset_schema: pyarrow.Schema,\n table_name: str,\n database: Optional[str] = None,\n schema: Optional[str] = None,\n compression: str = \"gzip\",\n on_error: str = \"abort_statement\",\n parallel: int = 4,\n quote_identifiers: bool = True,\n auto_create_table: bool = False,\n create_temp_table: bool = False,\n):\n cursor: SnowflakeCursor = conn.cursor()\n stage_name = create_temporary_sfc_stage(cursor)\n\n columns = [field.name for field in dataset_schema]\n upload_local_pq(path, cursor, stage_name, parallel)\n copy_uploaded_data_to_table(\n cursor,\n stage_name,\n columns,\n table_name,\n database,\n schema,\n compression,\n on_error,\n quote_identifiers,\n auto_create_table,\n create_temp_table,\n )\n\n\ndef copy_uploaded_data_to_table(\n cursor: SnowflakeCursor,\n stage_name: str,\n columns: List[str],\n table_name: str,\n database: Optional[str] = None,\n schema: Optional[str] = None,\n compression: str = \"gzip\",\n on_error: str = \"abort_statement\",\n quote_identifiers: bool = True,\n auto_create_table: bool = False,\n create_temp_table: bool = False,\n):\n if database is not None and schema is None:\n raise ProgrammingError(\n \"Schema has to be provided to write_pandas when a database is provided\"\n )\n # This dictionary maps the compression algorithm to Snowflake put copy into command type\n # https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html#type-parquet\n compression_map = {\"gzip\": \"auto\", \"snappy\": \"snappy\"}\n if compression not in compression_map.keys():\n raise ProgrammingError(\n \"Invalid compression '{}', only acceptable values are: {}\".format(\n compression, compression_map.keys()\n )\n )\n if quote_identifiers:\n location = (\n (('\"' + database + '\".') if database else \"\")\n + (('\"' + schema + '\".') if schema else \"\")\n + ('\"' + table_name + '\"')\n )\n else:\n location = (\n (database + \".\" if database else \"\")\n + (schema + \".\" if schema else \"\")\n + (table_name)\n )\n\n if quote_identifiers:\n quoted_columns = '\"' + '\",\"'.join(columns) + '\"'\n else:\n quoted_columns = \",\".join(columns)\n\n if auto_create_table:\n file_format_name = create_file_format(compression, compression_map, cursor)\n infer_schema_sql = f\"SELECT COLUMN_NAME, TYPE FROM table(infer_schema(location=>'@\\\"{stage_name}\\\"', file_format=>'{file_format_name}'))\"\n logger.debug(f\"inferring schema with '{infer_schema_sql}'\")\n result_cursor = cursor.execute(infer_schema_sql, _is_internal=True)\n if result_cursor is None:\n raise SnowflakeQueryUnknownError(infer_schema_sql)\n result = cast(List[Tuple[str, str]], result_cursor.fetchall())\n column_type_mapping: Dict[str, str] = dict(result)\n # Infer schema can return the columns out of order depending on the chunking we do when uploading\n # so we have to iterate through the dataframe columns to make sure we create the table with its\n # columns in order\n quote = '\"' if quote_identifiers else \"\"\n create_table_columns = \", \".join(\n [f\"{quote}{c}{quote} {column_type_mapping[c]}\" for c in columns]\n )\n create_table_sql = (\n f\"CREATE {'TEMP ' if create_temp_table else ''}TABLE IF NOT EXISTS {location} \"\n f\"({create_table_columns})\"\n f\" /* Python:snowflake.connector.pandas_tools.write_pandas() */ \"\n )\n logger.debug(f\"auto creating table with '{create_table_sql}'\")\n cursor.execute(create_table_sql, _is_internal=True)\n drop_file_format_sql = f\"DROP FILE FORMAT IF EXISTS {file_format_name}\"\n logger.debug(f\"dropping file format with '{drop_file_format_sql}'\")\n cursor.execute(drop_file_format_sql, _is_internal=True)\n\n # in Snowflake, all parquet data is stored in a single column, $1, so we must select columns explicitly\n # see (https://docs.snowflake.com/en/user-guide/script-data-load-transform-parquet.html)\n if quote_identifiers:\n parquet_columns = \"$1:\" + \",$1:\".join(f'\"{c}\"' for c in columns)\n else:\n parquet_columns = \"$1:\" + \",$1:\".join(columns)\n copy_into_sql = (\n \"COPY INTO {location} /* Python:snowflake.connector.pandas_tools.write_pandas() */ \"\n \"({columns}) \"\n 'FROM (SELECT {parquet_columns} FROM @\"{stage_name}\") '\n \"FILE_FORMAT=(TYPE=PARQUET COMPRESSION={compression}) \"\n \"PURGE=TRUE ON_ERROR={on_error}\"\n ).format(\n location=location,\n columns=quoted_columns,\n parquet_columns=parquet_columns,\n stage_name=stage_name,\n compression=compression_map[compression],\n on_error=on_error,\n )\n logger.debug(\"copying into with '{}'\".format(copy_into_sql))\n # Snowflake returns the original cursor if the query execution succeeded.\n result_cursor = cursor.execute(copy_into_sql, _is_internal=True)\n if result_cursor is None:\n raise SnowflakeQueryUnknownError(copy_into_sql)\n result_cursor.close()\n\n\ndef upload_df(\n df: pd.DataFrame,\n cursor: SnowflakeCursor,\n stage_name: str,\n chunk_size: Optional[int] = None,\n parallel: int = 4,\n compression: str = \"gzip\",\n):\n \"\"\"\n Args:\n df: Dataframe we'd like to write back.\n cursor: cursor to be used to communicate with Snowflake.\n stage_name: stage name in Snowflake connection.\n chunk_size: Number of elements to be inserted once, if not provided all elements will be dumped once\n (Default value = None).\n parallel: Number of threads to be used when uploading chunks, default follows documentation at:\n https://docs.snowflake.com/en/sql-reference/sql/put.html#optional-parameters (Default value = 4).\n compression: The compression used on the Parquet files, can only be gzip, or snappy. Gzip gives supposedly a\n better compression, while snappy is faster. Use whichever is more appropriate (Default value = 'gzip').\n\n \"\"\"\n if chunk_size is None:\n chunk_size = len(df)\n\n with TemporaryDirectory() as tmp_folder:\n for i, chunk in chunk_helper(df, chunk_size):\n chunk_path = os.path.join(tmp_folder, \"file{}.txt\".format(i))\n # Dump chunk into parquet file\n chunk.to_parquet(\n chunk_path,\n compression=compression,\n use_deprecated_int96_timestamps=True,\n )\n # Upload parquet file\n upload_sql = (\n \"PUT /* Python:feast.infra.utils.snowflake_utils.upload_df() */ \"\n \"'file://{path}' @\\\"{stage_name}\\\" PARALLEL={parallel}\"\n ).format(\n path=chunk_path.replace(\"\\\\\", \"\\\\\\\\\").replace(\"'\", \"\\\\'\"),\n stage_name=stage_name,\n parallel=parallel,\n )\n logger.debug(f\"uploading files with '{upload_sql}'\")\n cursor.execute(upload_sql, _is_internal=True)\n # Remove chunk file\n os.remove(chunk_path)\n\n\ndef upload_local_pq(\n path: Path,\n cursor: SnowflakeCursor,\n stage_name: str,\n parallel: int = 4,\n):\n \"\"\"\n Args:\n path: Path to parquet dataset on disk\n cursor: cursor to be used to communicate with Snowflake.\n stage_name: stage name in Snowflake connection.\n parallel: Number of threads to be used when uploading chunks, default follows documentation at:\n https://docs.snowflake.com/en/sql-reference/sql/put.html#optional-parameters (Default value = 4).\n \"\"\"\n for file in path.iterdir():\n upload_sql = (\n \"PUT /* Python:feast.infra.utils.snowflake_utils.upload_local_pq() */ \"\n \"'file://{path}' @\\\"{stage_name}\\\" PARALLEL={parallel}\"\n ).format(\n path=str(file).replace(\"\\\\\", \"\\\\\\\\\").replace(\"'\", \"\\\\'\"),\n stage_name=stage_name,\n parallel=parallel,\n )\n logger.debug(f\"uploading files with '{upload_sql}'\")\n cursor.execute(upload_sql, _is_internal=True)\n\n\n@retry(\n wait=wait_exponential(multiplier=1, max=4),\n retry=retry_if_exception_type(ProgrammingError),\n stop=stop_after_attempt(5),\n reraise=True,\n)\ndef create_file_format(\n compression: str, compression_map: Dict[str, str], cursor: SnowflakeCursor\n) -> str:\n file_format_name = (\n '\"' + \"\".join(random.choice(string.ascii_lowercase) for _ in range(5)) + '\"'\n )\n file_format_sql = (\n f\"CREATE FILE FORMAT {file_format_name} \"\n f\"/* Python:snowflake.connector.pandas_tools.write_pandas() */ \"\n f\"TYPE=PARQUET COMPRESSION={compression_map[compression]}\"\n )\n logger.debug(f\"creating file format with '{file_format_sql}'\")\n cursor.execute(file_format_sql, _is_internal=True)\n return file_format_name\n\n\n@retry(\n wait=wait_exponential(multiplier=1, max=4),\n retry=retry_if_exception_type(ProgrammingError),\n stop=stop_after_attempt(5),\n reraise=True,\n)\ndef create_temporary_sfc_stage(cursor: SnowflakeCursor) -> str:\n stage_name = \"\".join(random.choice(string.ascii_lowercase) for _ in range(5))\n create_stage_sql = (\n \"create temporary stage /* Python:snowflake.connector.pandas_tools.write_pandas() */ \"\n '\"{stage_name}\"'\n ).format(stage_name=stage_name)\n logger.debug(f\"creating stage with '{create_stage_sql}'\")\n result_cursor = cursor.execute(create_stage_sql, _is_internal=True)\n if result_cursor is None:\n raise SnowflakeQueryUnknownError(create_stage_sql)\n result_cursor.fetchall()\n return stage_name\n\n\ndef chunk_helper(lst: pd.DataFrame, n: int) -> Iterator[Tuple[int, pd.DataFrame]]:\n \"\"\"Helper generator to chunk a sequence efficiently with current index like if enumerate was called on sequence.\"\"\"\n for i in range(0, len(lst), n):\n yield int(i / n), lst[i : i + n]\n\n\ndef parse_private_key_path(key_path: str, private_key_passphrase: str) -> bytes:\n\n with open(key_path, \"rb\") as key:\n p_key = serialization.load_pem_private_key(\n key.read(),\n password=private_key_passphrase.encode(),\n backend=default_backend(),\n )\n\n pkb = p_key.private_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n )\n\n return pkb\n\n\ndef write_pandas_binary(\n conn: SnowflakeConnection,\n df: pd.DataFrame,\n table_name: str,\n database: Optional[str] = None,\n schema: Optional[str] = None,\n chunk_size: Optional[int] = None,\n compression: str = \"gzip\",\n on_error: str = \"abort_statement\",\n parallel: int = 4,\n quote_identifiers: bool = True,\n auto_create_table: bool = False,\n create_temp_table: bool = False,\n):\n \"\"\"Allows users to most efficiently write back a pandas DataFrame to Snowflake.\n\n It works by dumping the DataFrame into Parquet files, uploading them and finally copying their data into the table.\n\n Returns whether all files were ingested correctly, number of chunks uploaded, and number of rows ingested\n with all of the COPY INTO command's output for debugging purposes.\n\n Example usage:\n import pandas\n from snowflake.connector.pandas_tools import write_pandas\n\n df = pandas.DataFrame([('Mark', 10), ('Luke', 20)], columns=['name', 'balance'])\n success, nchunks, nrows, _ = write_pandas(cnx, df, 'customers')\n\n Args:\n conn: Connection to be used to communicate with Snowflake.\n df: Dataframe we'd like to write back.\n table_name: Table name where we want to insert into.\n database: Database table is in, if not provided the connection one will be used.\n schema: Schema table is in, if not provided the connection one will be used.\n chunk_size: Number of elements to be inserted once, if not provided all elements will be dumped once\n (Default value = None).\n compression: The compression used on the Parquet files, can only be gzip, or snappy. Gzip gives supposedly a\n better compression, while snappy is faster. Use whichever is more appropriate (Default value = 'gzip').\n on_error: Action to take when COPY INTO statements fail, default follows documentation at:\n https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html#copy-options-copyoptions\n (Default value = 'abort_statement').\n parallel: Number of threads to be used when uploading chunks, default follows documentation at:\n https://docs.snowflake.com/en/sql-reference/sql/put.html#optional-parameters (Default value = 4).\n quote_identifiers: By default, identifiers, specifically database, schema, table and column names\n (from df.columns) will be quoted. If set to False, identifiers are passed on to Snowflake without quoting.\n I.e. identifiers will be coerced to uppercase by Snowflake. (Default value = True)\n auto_create_table: When true, will automatically create a table with corresponding columns for each column in\n the passed in DataFrame. The table will not be created if it already exists\n create_temp_table: Will make the auto-created table as a temporary table\n \"\"\"\n if database is not None and schema is None:\n raise ProgrammingError(\n \"Schema has to be provided to write_pandas when a database is provided\"\n )\n # This dictionary maps the compression algorithm to Snowflake put copy into command type\n # https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html#type-parquet\n compression_map = {\"gzip\": \"auto\", \"snappy\": \"snappy\"}\n if compression not in compression_map.keys():\n raise ProgrammingError(\n \"Invalid compression '{}', only acceptable values are: {}\".format(\n compression, compression_map.keys()\n )\n )\n if quote_identifiers:\n location = (\n (('\"' + database + '\".') if database else \"\")\n + (('\"' + schema + '\".') if schema else \"\")\n + ('\"' + table_name + '\"')\n )\n else:\n location = (\n (database + \".\" if database else \"\")\n + (schema + \".\" if schema else \"\")\n + (table_name)\n )\n if chunk_size is None:\n chunk_size = len(df)\n cursor: SnowflakeCursor = conn.cursor()\n stage_name = create_temporary_sfc_stage(cursor)\n\n with TemporaryDirectory() as tmp_folder:\n for i, chunk in chunk_helper(df, chunk_size):\n chunk_path = os.path.join(tmp_folder, \"file{}.txt\".format(i))\n # Dump chunk into parquet file\n chunk.to_parquet(\n chunk_path,\n compression=compression,\n use_deprecated_int96_timestamps=True,\n )\n # Upload parquet file\n upload_sql = (\n \"PUT /* Python:snowflake.connector.pandas_tools.write_pandas() */ \"\n \"'file://{path}' @\\\"{stage_name}\\\" PARALLEL={parallel}\"\n ).format(\n path=chunk_path.replace(\"\\\\\", \"\\\\\\\\\").replace(\"'\", \"\\\\'\"),\n stage_name=stage_name,\n parallel=parallel,\n )\n logger.debug(f\"uploading files with '{upload_sql}'\")\n cursor.execute(upload_sql, _is_internal=True)\n # Remove chunk file\n os.remove(chunk_path)\n if quote_identifiers:\n columns = '\"' + '\",\"'.join(list(df.columns)) + '\"'\n else:\n columns = \",\".join(list(df.columns))\n\n if auto_create_table:\n file_format_name = create_file_format(compression, compression_map, cursor)\n infer_schema_sql = f\"SELECT COLUMN_NAME, TYPE FROM table(infer_schema(location=>'@\\\"{stage_name}\\\"', file_format=>'{file_format_name}'))\"\n logger.debug(f\"inferring schema with '{infer_schema_sql}'\")\n result_cursor = cursor.execute(infer_schema_sql, _is_internal=True)\n if result_cursor is None:\n raise SnowflakeQueryUnknownError(infer_schema_sql)\n result = cast(List[Tuple[str, str]], result_cursor.fetchall())\n column_type_mapping: Dict[str, str] = dict(result)\n # Infer schema can return the columns out of order depending on the chunking we do when uploading\n # so we have to iterate through the dataframe columns to make sure we create the table with its\n # columns in order\n quote = '\"' if quote_identifiers else \"\"\n create_table_columns = \", \".join(\n [f\"{quote}{c}{quote} {column_type_mapping[c]}\" for c in df.columns]\n )\n create_table_sql = (\n f\"CREATE {'TEMP ' if create_temp_table else ''}TABLE IF NOT EXISTS {location} \"\n f\"({create_table_columns})\"\n f\" /* Python:snowflake.connector.pandas_tools.write_pandas() */ \"\n )\n logger.debug(f\"auto creating table with '{create_table_sql}'\")\n cursor.execute(create_table_sql, _is_internal=True)\n drop_file_format_sql = f\"DROP FILE FORMAT IF EXISTS {file_format_name}\"\n logger.debug(f\"dropping file format with '{drop_file_format_sql}'\")\n cursor.execute(drop_file_format_sql, _is_internal=True)\n\n # in Snowflake, all parquet data is stored in a single column, $1, so we must select columns explicitly\n # see (https://docs.snowflake.com/en/user-guide/script-data-load-transform-parquet.html)\n if quote_identifiers:\n parquet_columns = \",\".join(\n f'TO_BINARY($1:\"{c}\")'\n if c in [\"entity_feature_key\", \"entity_key\", \"value\"]\n else f'$1:\"{c}\"'\n for c in df.columns\n )\n else:\n parquet_columns = \",\".join(\n f\"TO_BINARY($1:{c})\"\n if c in [\"entity_feature_key\", \"entity_key\", \"value\"]\n else f\"$1:{c}\"\n for c in df.columns\n )\n\n copy_into_sql = (\n \"COPY INTO {location} /* Python:snowflake.connector.pandas_tools.write_pandas() */ \"\n \"({columns}) \"\n 'FROM (SELECT {parquet_columns} FROM @\"{stage_name}\") '\n \"FILE_FORMAT=(TYPE=PARQUET COMPRESSION={compression} BINARY_AS_TEXT = FALSE) \"\n \"PURGE=TRUE ON_ERROR={on_error}\"\n ).format(\n location=location,\n columns=columns,\n parquet_columns=parquet_columns,\n stage_name=stage_name,\n compression=compression_map[compression],\n on_error=on_error,\n )\n logger.debug(\"copying into with '{}'\".format(copy_into_sql))\n # Snowflake returns the original cursor if the query execution succeeded.\n result_cursor = cursor.execute(copy_into_sql, _is_internal=True)\n if result_cursor is None:\n raise SnowflakeQueryUnknownError(copy_into_sql)\n result_cursor.close()\n",
"path": "sdk/python/feast/infra/utils/snowflake_utils.py"
}
] | [
{
"content": "import configparser\nimport os\nimport random\nimport string\nfrom logging import getLogger\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\nfrom typing import Any, Dict, Iterator, List, Optional, Tuple, cast\n\nimport pandas as pd\nimport pyarrow\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\nfrom tenacity import (\n retry,\n retry_if_exception_type,\n stop_after_attempt,\n wait_exponential,\n)\n\nfrom feast.errors import SnowflakeIncompleteConfig, SnowflakeQueryUnknownError\n\ntry:\n import snowflake.connector\n from snowflake.connector import ProgrammingError, SnowflakeConnection\n from snowflake.connector.cursor import SnowflakeCursor\nexcept ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"snowflake\", str(e))\n\n\ngetLogger(\"snowflake.connector.cursor\").disabled = True\ngetLogger(\"snowflake.connector.connection\").disabled = True\ngetLogger(\"snowflake.connector.network\").disabled = True\nlogger = getLogger(__name__)\n\n\ndef execute_snowflake_statement(conn: SnowflakeConnection, query) -> SnowflakeCursor:\n cursor = conn.cursor().execute(query)\n if cursor is None:\n raise SnowflakeQueryUnknownError(query)\n return cursor\n\n\ndef get_snowflake_conn(config, autocommit=True) -> SnowflakeConnection:\n assert config.type in [\"snowflake.offline\", \"snowflake.online\"]\n\n if config.type == \"snowflake.offline\":\n config_header = \"connections.feast_offline_store\"\n elif config.type == \"snowflake.online\":\n config_header = \"connections.feast_online_store\"\n\n config_dict = dict(config)\n\n # read config file\n config_reader = configparser.ConfigParser()\n config_reader.read([config_dict[\"config_path\"]])\n kwargs: Dict[str, Any] = {}\n if config_reader.has_section(config_header):\n kwargs = dict(config_reader[config_header])\n\n if \"schema\" in kwargs:\n kwargs[\"schema_\"] = kwargs.pop(\"schema\")\n\n kwargs.update((k, v) for k, v in config_dict.items() if v is not None)\n\n for k, v in kwargs.items():\n if k in [\"role\", \"warehouse\", \"database\", \"schema_\"]:\n kwargs[k] = f'\"{v}\"'\n\n if \"schema_\" in kwargs:\n kwargs[\"schema\"] = kwargs.pop(\"schema_\")\n else:\n kwargs[\"schema\"] = '\"PUBLIC\"'\n\n # https://docs.snowflake.com/en/user-guide/python-connector-example.html#using-key-pair-authentication-key-pair-rotation\n # https://docs.snowflake.com/en/user-guide/key-pair-auth.html#configuring-key-pair-authentication\n if \"private_key\" in kwargs:\n kwargs[\"private_key\"] = parse_private_key_path(\n kwargs[\"private_key\"], kwargs[\"private_key_passphrase\"]\n )\n\n try:\n conn = snowflake.connector.connect(\n application=\"feast\",\n autocommit=autocommit,\n **kwargs,\n )\n\n conn.cursor().execute(\"ALTER SESSION SET TIMEZONE = 'UTC'\", _is_internal=True)\n\n return conn\n except KeyError as e:\n raise SnowflakeIncompleteConfig(e)\n\n\n# TO DO -- sfc-gh-madkins\n# Remove dependency on write_pandas function by falling back to native snowflake python connector\n# Current issue is datetime[ns] types are read incorrectly in Snowflake, need to coerce to datetime[ns, UTC]\ndef write_pandas(\n conn: SnowflakeConnection,\n df: pd.DataFrame,\n table_name: str,\n database: Optional[str] = None,\n schema: Optional[str] = None,\n chunk_size: Optional[int] = None,\n compression: str = \"gzip\",\n on_error: str = \"abort_statement\",\n parallel: int = 4,\n quote_identifiers: bool = True,\n auto_create_table: bool = False,\n create_temp_table: bool = False,\n):\n \"\"\"Allows users to most efficiently write back a pandas DataFrame to Snowflake.\n\n It works by dumping the DataFrame into Parquet files, uploading them and finally copying their data into the table.\n\n Returns whether all files were ingested correctly, number of chunks uploaded, and number of rows ingested\n with all of the COPY INTO command's output for debugging purposes.\n\n Example usage:\n import pandas\n from snowflake.connector.pandas_tools import write_pandas\n\n df = pandas.DataFrame([('Mark', 10), ('Luke', 20)], columns=['name', 'balance'])\n success, nchunks, nrows, _ = write_pandas(cnx, df, 'customers')\n\n Args:\n conn: Connection to be used to communicate with Snowflake.\n df: Dataframe we'd like to write back.\n table_name: Table name where we want to insert into.\n database: Database table is in, if not provided the connection one will be used.\n schema: Schema table is in, if not provided the connection one will be used.\n chunk_size: Number of elements to be inserted once, if not provided all elements will be dumped once\n (Default value = None).\n compression: The compression used on the Parquet files, can only be gzip, or snappy. Gzip gives supposedly a\n better compression, while snappy is faster. Use whichever is more appropriate (Default value = 'gzip').\n on_error: Action to take when COPY INTO statements fail, default follows documentation at:\n https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html#copy-options-copyoptions\n (Default value = 'abort_statement').\n parallel: Number of threads to be used when uploading chunks, default follows documentation at:\n https://docs.snowflake.com/en/sql-reference/sql/put.html#optional-parameters (Default value = 4).\n quote_identifiers: By default, identifiers, specifically database, schema, table and column names\n (from df.columns) will be quoted. If set to False, identifiers are passed on to Snowflake without quoting.\n I.e. identifiers will be coerced to uppercase by Snowflake. (Default value = True)\n auto_create_table: When true, will automatically create a table with corresponding columns for each column in\n the passed in DataFrame. The table will not be created if it already exists\n create_temp_table: Will make the auto-created table as a temporary table\n \"\"\"\n\n cursor: SnowflakeCursor = conn.cursor()\n stage_name = create_temporary_sfc_stage(cursor)\n\n upload_df(df, cursor, stage_name, chunk_size, parallel, compression)\n copy_uploaded_data_to_table(\n cursor,\n stage_name,\n list(df.columns),\n table_name,\n database,\n schema,\n compression,\n on_error,\n quote_identifiers,\n auto_create_table,\n create_temp_table,\n )\n\n\ndef write_parquet(\n conn: SnowflakeConnection,\n path: Path,\n dataset_schema: pyarrow.Schema,\n table_name: str,\n database: Optional[str] = None,\n schema: Optional[str] = None,\n compression: str = \"gzip\",\n on_error: str = \"abort_statement\",\n parallel: int = 4,\n quote_identifiers: bool = True,\n auto_create_table: bool = False,\n create_temp_table: bool = False,\n):\n cursor: SnowflakeCursor = conn.cursor()\n stage_name = create_temporary_sfc_stage(cursor)\n\n columns = [field.name for field in dataset_schema]\n upload_local_pq(path, cursor, stage_name, parallel)\n copy_uploaded_data_to_table(\n cursor,\n stage_name,\n columns,\n table_name,\n database,\n schema,\n compression,\n on_error,\n quote_identifiers,\n auto_create_table,\n create_temp_table,\n )\n\n\ndef copy_uploaded_data_to_table(\n cursor: SnowflakeCursor,\n stage_name: str,\n columns: List[str],\n table_name: str,\n database: Optional[str] = None,\n schema: Optional[str] = None,\n compression: str = \"gzip\",\n on_error: str = \"abort_statement\",\n quote_identifiers: bool = True,\n auto_create_table: bool = False,\n create_temp_table: bool = False,\n):\n if database is not None and schema is None:\n raise ProgrammingError(\n \"Schema has to be provided to write_pandas when a database is provided\"\n )\n # This dictionary maps the compression algorithm to Snowflake put copy into command type\n # https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html#type-parquet\n compression_map = {\"gzip\": \"auto\", \"snappy\": \"snappy\"}\n if compression not in compression_map.keys():\n raise ProgrammingError(\n \"Invalid compression '{}', only acceptable values are: {}\".format(\n compression, compression_map.keys()\n )\n )\n if quote_identifiers:\n location = (\n (('\"' + database + '\".') if database else \"\")\n + (('\"' + schema + '\".') if schema else \"\")\n + ('\"' + table_name + '\"')\n )\n else:\n location = (\n (database + \".\" if database else \"\")\n + (schema + \".\" if schema else \"\")\n + (table_name)\n )\n\n if quote_identifiers:\n quoted_columns = '\"' + '\",\"'.join(columns) + '\"'\n else:\n quoted_columns = \",\".join(columns)\n\n if auto_create_table:\n file_format_name = create_file_format(compression, compression_map, cursor)\n infer_schema_sql = f\"SELECT COLUMN_NAME, TYPE FROM table(infer_schema(location=>'@\\\"{stage_name}\\\"', file_format=>'{file_format_name}'))\"\n logger.debug(f\"inferring schema with '{infer_schema_sql}'\")\n result_cursor = cursor.execute(infer_schema_sql, _is_internal=True)\n if result_cursor is None:\n raise SnowflakeQueryUnknownError(infer_schema_sql)\n result = cast(List[Tuple[str, str]], result_cursor.fetchall())\n column_type_mapping: Dict[str, str] = dict(result)\n # Infer schema can return the columns out of order depending on the chunking we do when uploading\n # so we have to iterate through the dataframe columns to make sure we create the table with its\n # columns in order\n quote = '\"' if quote_identifiers else \"\"\n create_table_columns = \", \".join(\n [f\"{quote}{c}{quote} {column_type_mapping[c]}\" for c in columns]\n )\n create_table_sql = (\n f\"CREATE {'TEMP ' if create_temp_table else ''}TABLE IF NOT EXISTS {location} \"\n f\"({create_table_columns})\"\n f\" /* Python:snowflake.connector.pandas_tools.write_pandas() */ \"\n )\n logger.debug(f\"auto creating table with '{create_table_sql}'\")\n cursor.execute(create_table_sql, _is_internal=True)\n drop_file_format_sql = f\"DROP FILE FORMAT IF EXISTS {file_format_name}\"\n logger.debug(f\"dropping file format with '{drop_file_format_sql}'\")\n cursor.execute(drop_file_format_sql, _is_internal=True)\n\n # in Snowflake, all parquet data is stored in a single column, $1, so we must select columns explicitly\n # see (https://docs.snowflake.com/en/user-guide/script-data-load-transform-parquet.html)\n if quote_identifiers:\n parquet_columns = \"$1:\" + \",$1:\".join(f'\"{c}\"' for c in columns)\n else:\n parquet_columns = \"$1:\" + \",$1:\".join(columns)\n copy_into_sql = (\n \"COPY INTO {location} /* Python:snowflake.connector.pandas_tools.write_pandas() */ \"\n \"({columns}) \"\n 'FROM (SELECT {parquet_columns} FROM @\"{stage_name}\") '\n \"FILE_FORMAT=(TYPE=PARQUET COMPRESSION={compression}) \"\n \"PURGE=TRUE ON_ERROR={on_error}\"\n ).format(\n location=location,\n columns=quoted_columns,\n parquet_columns=parquet_columns,\n stage_name=stage_name,\n compression=compression_map[compression],\n on_error=on_error,\n )\n logger.debug(\"copying into with '{}'\".format(copy_into_sql))\n # Snowflake returns the original cursor if the query execution succeeded.\n result_cursor = cursor.execute(copy_into_sql, _is_internal=True)\n if result_cursor is None:\n raise SnowflakeQueryUnknownError(copy_into_sql)\n result_cursor.close()\n\n\ndef upload_df(\n df: pd.DataFrame,\n cursor: SnowflakeCursor,\n stage_name: str,\n chunk_size: Optional[int] = None,\n parallel: int = 4,\n compression: str = \"gzip\",\n):\n \"\"\"\n Args:\n df: Dataframe we'd like to write back.\n cursor: cursor to be used to communicate with Snowflake.\n stage_name: stage name in Snowflake connection.\n chunk_size: Number of elements to be inserted once, if not provided all elements will be dumped once\n (Default value = None).\n parallel: Number of threads to be used when uploading chunks, default follows documentation at:\n https://docs.snowflake.com/en/sql-reference/sql/put.html#optional-parameters (Default value = 4).\n compression: The compression used on the Parquet files, can only be gzip, or snappy. Gzip gives supposedly a\n better compression, while snappy is faster. Use whichever is more appropriate (Default value = 'gzip').\n\n \"\"\"\n if chunk_size is None:\n chunk_size = len(df)\n\n with TemporaryDirectory() as tmp_folder:\n for i, chunk in chunk_helper(df, chunk_size):\n chunk_path = os.path.join(tmp_folder, \"file{}.txt\".format(i))\n # Dump chunk into parquet file\n chunk.to_parquet(\n chunk_path,\n compression=compression,\n use_deprecated_int96_timestamps=True,\n )\n # Upload parquet file\n upload_sql = (\n \"PUT /* Python:feast.infra.utils.snowflake_utils.upload_df() */ \"\n \"'file://{path}' @\\\"{stage_name}\\\" PARALLEL={parallel}\"\n ).format(\n path=chunk_path.replace(\"\\\\\", \"\\\\\\\\\").replace(\"'\", \"\\\\'\"),\n stage_name=stage_name,\n parallel=parallel,\n )\n logger.debug(f\"uploading files with '{upload_sql}'\")\n cursor.execute(upload_sql, _is_internal=True)\n # Remove chunk file\n os.remove(chunk_path)\n\n\ndef upload_local_pq(\n path: Path,\n cursor: SnowflakeCursor,\n stage_name: str,\n parallel: int = 4,\n):\n \"\"\"\n Args:\n path: Path to parquet dataset on disk\n cursor: cursor to be used to communicate with Snowflake.\n stage_name: stage name in Snowflake connection.\n parallel: Number of threads to be used when uploading chunks, default follows documentation at:\n https://docs.snowflake.com/en/sql-reference/sql/put.html#optional-parameters (Default value = 4).\n \"\"\"\n for file in path.iterdir():\n upload_sql = (\n \"PUT /* Python:feast.infra.utils.snowflake_utils.upload_local_pq() */ \"\n \"'file://{path}' @\\\"{stage_name}\\\" PARALLEL={parallel}\"\n ).format(\n path=str(file).replace(\"\\\\\", \"\\\\\\\\\").replace(\"'\", \"\\\\'\"),\n stage_name=stage_name,\n parallel=parallel,\n )\n logger.debug(f\"uploading files with '{upload_sql}'\")\n cursor.execute(upload_sql, _is_internal=True)\n\n\n@retry(\n wait=wait_exponential(multiplier=1, max=4),\n retry=retry_if_exception_type(ProgrammingError),\n stop=stop_after_attempt(5),\n reraise=True,\n)\ndef create_file_format(\n compression: str, compression_map: Dict[str, str], cursor: SnowflakeCursor\n) -> str:\n file_format_name = (\n '\"' + \"\".join(random.choice(string.ascii_lowercase) for _ in range(5)) + '\"'\n )\n file_format_sql = (\n f\"CREATE FILE FORMAT {file_format_name} \"\n f\"/* Python:snowflake.connector.pandas_tools.write_pandas() */ \"\n f\"TYPE=PARQUET COMPRESSION={compression_map[compression]}\"\n )\n logger.debug(f\"creating file format with '{file_format_sql}'\")\n cursor.execute(file_format_sql, _is_internal=True)\n return file_format_name\n\n\n@retry(\n wait=wait_exponential(multiplier=1, max=4),\n retry=retry_if_exception_type(ProgrammingError),\n stop=stop_after_attempt(5),\n reraise=True,\n)\ndef create_temporary_sfc_stage(cursor: SnowflakeCursor) -> str:\n stage_name = \"\".join(random.choice(string.ascii_lowercase) for _ in range(5))\n create_stage_sql = (\n \"create temporary stage /* Python:snowflake.connector.pandas_tools.write_pandas() */ \"\n '\"{stage_name}\"'\n ).format(stage_name=stage_name)\n logger.debug(f\"creating stage with '{create_stage_sql}'\")\n result_cursor = cursor.execute(create_stage_sql, _is_internal=True)\n if result_cursor is None:\n raise SnowflakeQueryUnknownError(create_stage_sql)\n result_cursor.fetchall()\n return stage_name\n\n\ndef chunk_helper(lst: pd.DataFrame, n: int) -> Iterator[Tuple[int, pd.DataFrame]]:\n \"\"\"Helper generator to chunk a sequence efficiently with current index like if enumerate was called on sequence.\"\"\"\n for i in range(0, len(lst), n):\n yield int(i / n), lst[i : i + n]\n\n\ndef parse_private_key_path(key_path: str, private_key_passphrase: str) -> bytes:\n\n with open(key_path, \"rb\") as key:\n p_key = serialization.load_pem_private_key(\n key.read(),\n password=private_key_passphrase.encode(),\n backend=default_backend(),\n )\n\n pkb = p_key.private_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n )\n\n return pkb\n\n\ndef write_pandas_binary(\n conn: SnowflakeConnection,\n df: pd.DataFrame,\n table_name: str,\n database: Optional[str] = None,\n schema: Optional[str] = None,\n chunk_size: Optional[int] = None,\n compression: str = \"gzip\",\n on_error: str = \"abort_statement\",\n parallel: int = 4,\n quote_identifiers: bool = True,\n auto_create_table: bool = False,\n create_temp_table: bool = False,\n):\n \"\"\"Allows users to most efficiently write back a pandas DataFrame to Snowflake.\n\n It works by dumping the DataFrame into Parquet files, uploading them and finally copying their data into the table.\n\n Returns whether all files were ingested correctly, number of chunks uploaded, and number of rows ingested\n with all of the COPY INTO command's output for debugging purposes.\n\n Example usage:\n import pandas\n from snowflake.connector.pandas_tools import write_pandas\n\n df = pandas.DataFrame([('Mark', 10), ('Luke', 20)], columns=['name', 'balance'])\n success, nchunks, nrows, _ = write_pandas(cnx, df, 'customers')\n\n Args:\n conn: Connection to be used to communicate with Snowflake.\n df: Dataframe we'd like to write back.\n table_name: Table name where we want to insert into.\n database: Database table is in, if not provided the connection one will be used.\n schema: Schema table is in, if not provided the connection one will be used.\n chunk_size: Number of elements to be inserted once, if not provided all elements will be dumped once\n (Default value = None).\n compression: The compression used on the Parquet files, can only be gzip, or snappy. Gzip gives supposedly a\n better compression, while snappy is faster. Use whichever is more appropriate (Default value = 'gzip').\n on_error: Action to take when COPY INTO statements fail, default follows documentation at:\n https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html#copy-options-copyoptions\n (Default value = 'abort_statement').\n parallel: Number of threads to be used when uploading chunks, default follows documentation at:\n https://docs.snowflake.com/en/sql-reference/sql/put.html#optional-parameters (Default value = 4).\n quote_identifiers: By default, identifiers, specifically database, schema, table and column names\n (from df.columns) will be quoted. If set to False, identifiers are passed on to Snowflake without quoting.\n I.e. identifiers will be coerced to uppercase by Snowflake. (Default value = True)\n auto_create_table: When true, will automatically create a table with corresponding columns for each column in\n the passed in DataFrame. The table will not be created if it already exists\n create_temp_table: Will make the auto-created table as a temporary table\n \"\"\"\n if database is not None and schema is None:\n raise ProgrammingError(\n \"Schema has to be provided to write_pandas when a database is provided\"\n )\n # This dictionary maps the compression algorithm to Snowflake put copy into command type\n # https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html#type-parquet\n compression_map = {\"gzip\": \"auto\", \"snappy\": \"snappy\"}\n if compression not in compression_map.keys():\n raise ProgrammingError(\n \"Invalid compression '{}', only acceptable values are: {}\".format(\n compression, compression_map.keys()\n )\n )\n if quote_identifiers:\n location = (\n (('\"' + database + '\".') if database else \"\")\n + (('\"' + schema + '\".') if schema else \"\")\n + ('\"' + table_name + '\"')\n )\n else:\n location = (\n (database + \".\" if database else \"\")\n + (schema + \".\" if schema else \"\")\n + (table_name)\n )\n if chunk_size is None:\n chunk_size = len(df)\n cursor: SnowflakeCursor = conn.cursor()\n stage_name = create_temporary_sfc_stage(cursor)\n\n with TemporaryDirectory() as tmp_folder:\n for i, chunk in chunk_helper(df, chunk_size):\n chunk_path = os.path.join(tmp_folder, \"file{}.txt\".format(i))\n # Dump chunk into parquet file\n chunk.to_parquet(\n chunk_path,\n compression=compression,\n use_deprecated_int96_timestamps=True,\n )\n # Upload parquet file\n upload_sql = (\n \"PUT /* Python:snowflake.connector.pandas_tools.write_pandas() */ \"\n \"'file://{path}' @\\\"{stage_name}\\\" PARALLEL={parallel}\"\n ).format(\n path=chunk_path.replace(\"\\\\\", \"\\\\\\\\\").replace(\"'\", \"\\\\'\"),\n stage_name=stage_name,\n parallel=parallel,\n )\n logger.debug(f\"uploading files with '{upload_sql}'\")\n cursor.execute(upload_sql, _is_internal=True)\n # Remove chunk file\n os.remove(chunk_path)\n if quote_identifiers:\n columns = '\"' + '\",\"'.join(list(df.columns)) + '\"'\n else:\n columns = \",\".join(list(df.columns))\n\n if auto_create_table:\n file_format_name = create_file_format(compression, compression_map, cursor)\n infer_schema_sql = f\"SELECT COLUMN_NAME, TYPE FROM table(infer_schema(location=>'@\\\"{stage_name}\\\"', file_format=>'{file_format_name}'))\"\n logger.debug(f\"inferring schema with '{infer_schema_sql}'\")\n result_cursor = cursor.execute(infer_schema_sql, _is_internal=True)\n if result_cursor is None:\n raise SnowflakeQueryUnknownError(infer_schema_sql)\n result = cast(List[Tuple[str, str]], result_cursor.fetchall())\n column_type_mapping: Dict[str, str] = dict(result)\n # Infer schema can return the columns out of order depending on the chunking we do when uploading\n # so we have to iterate through the dataframe columns to make sure we create the table with its\n # columns in order\n quote = '\"' if quote_identifiers else \"\"\n create_table_columns = \", \".join(\n [f\"{quote}{c}{quote} {column_type_mapping[c]}\" for c in df.columns]\n )\n create_table_sql = (\n f\"CREATE {'TEMP ' if create_temp_table else ''}TABLE IF NOT EXISTS {location} \"\n f\"({create_table_columns})\"\n f\" /* Python:snowflake.connector.pandas_tools.write_pandas() */ \"\n )\n logger.debug(f\"auto creating table with '{create_table_sql}'\")\n cursor.execute(create_table_sql, _is_internal=True)\n drop_file_format_sql = f\"DROP FILE FORMAT IF EXISTS {file_format_name}\"\n logger.debug(f\"dropping file format with '{drop_file_format_sql}'\")\n cursor.execute(drop_file_format_sql, _is_internal=True)\n\n # in Snowflake, all parquet data is stored in a single column, $1, so we must select columns explicitly\n # see (https://docs.snowflake.com/en/user-guide/script-data-load-transform-parquet.html)\n if quote_identifiers:\n parquet_columns = \",\".join(\n f'TO_BINARY($1:\"{c}\")'\n if c in [\"entity_feature_key\", \"entity_key\", \"value\"]\n else f'$1:\"{c}\"'\n for c in df.columns\n )\n else:\n parquet_columns = \",\".join(\n f\"TO_BINARY($1:{c})\"\n if c in [\"entity_feature_key\", \"entity_key\", \"value\"]\n else f\"$1:{c}\"\n for c in df.columns\n )\n\n copy_into_sql = (\n \"COPY INTO {location} /* Python:snowflake.connector.pandas_tools.write_pandas() */ \"\n \"({columns}) \"\n 'FROM (SELECT {parquet_columns} FROM @\"{stage_name}\") '\n \"FILE_FORMAT=(TYPE=PARQUET COMPRESSION={compression} BINARY_AS_TEXT = FALSE) \"\n \"PURGE=TRUE ON_ERROR={on_error}\"\n ).format(\n location=location,\n columns=columns,\n parquet_columns=parquet_columns,\n stage_name=stage_name,\n compression=compression_map[compression],\n on_error=on_error,\n )\n logger.debug(\"copying into with '{}'\".format(copy_into_sql))\n # Snowflake returns the original cursor if the query execution succeeded.\n result_cursor = cursor.execute(copy_into_sql, _is_internal=True)\n if result_cursor is None:\n raise SnowflakeQueryUnknownError(copy_into_sql)\n result_cursor.close()\n",
"path": "sdk/python/feast/infra/utils/snowflake_utils.py"
}
] | diff --git a/sdk/python/feast/infra/utils/snowflake_utils.py b/sdk/python/feast/infra/utils/snowflake_utils.py
index 6ba9e544894..6b3500b605e 100644
--- a/sdk/python/feast/infra/utils/snowflake_utils.py
+++ b/sdk/python/feast/infra/utils/snowflake_utils.py
@@ -88,6 +88,8 @@ def get_snowflake_conn(config, autocommit=True) -> SnowflakeConnection:
**kwargs,
)
+ conn.cursor().execute("ALTER SESSION SET TIMEZONE = 'UTC'", _is_internal=True)
+
return conn
except KeyError as e:
raise SnowflakeIncompleteConfig(e)
|
translate__translate-3603 | po2ts fails with ascii encode error on py2 (should use utf-8)
Test file:
[octave.zip](https://github.com/translate/translate/files/870288/octave.zip)
```
$ po2ts octave.po oct.ts
processing 1 files...
po2ts: WARNING: Error processing: input octave.po, output oct.ts, template None: 'ascii' codec can't encode characters in position 187-188: ordinal not in range(128)
[###########################################] 100%
$ python --version
Python 2.7.12
```
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2004-2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Convert Gettext PO localization files to Qt Linguist (.ts) files.\n\nSee: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/ts2po.html\nfor examples and usage instructions.\n\"\"\"\n\nfrom translate.storage import po, ts\n\n\nclass po2ts(object):\n\n def convertstore(self, inputstore, templatefile=None, context=None):\n \"\"\"converts a .po file to .ts format (using a template .ts file if given)\"\"\"\n if templatefile is None:\n tsfile = ts.QtTsParser()\n else:\n tsfile = ts.QtTsParser(templatefile)\n for inputunit in inputstore.units:\n if inputunit.isheader() or inputunit.isblank():\n continue\n source = inputunit.source\n translation = inputunit.target\n comment = inputunit.getnotes(\"translator\")\n transtype = None\n if not inputunit.istranslated():\n transtype = \"unfinished\"\n elif inputunit.getnotes(\"developer\") == \"(obsolete)\":\n transtype = \"obsolete\"\n if isinstance(source, bytes):\n source = source.decode(\"utf-8\")\n if isinstance(translation, bytes):\n translation = translation.decode(\"utf-8\")\n for sourcelocation in inputunit.getlocations():\n if context is None:\n if \"#\" in sourcelocation:\n contextname = sourcelocation[:sourcelocation.find(\"#\")]\n else:\n contextname = sourcelocation\n else:\n contextname = context\n tsfile.addtranslation(contextname, source, translation, comment, transtype, createifmissing=True)\n return tsfile.getxml()\n\n\ndef convertpo(inputfile, outputfile, templatefile, context):\n \"\"\"reads in stdin using fromfileclass, converts using convertorclass, writes to stdout\"\"\"\n inputstore = po.pofile(inputfile)\n if inputstore.isempty():\n return 0\n convertor = po2ts()\n outputstring = convertor.convertstore(inputstore, templatefile, context)\n outputfile.write(outputstring)\n return 1\n\n\ndef main(argv=None):\n from translate.convert import convert\n formats = {\"po\": (\"ts\", convertpo), (\"po\", \"ts\"): (\"ts\", convertpo)}\n parser = convert.ConvertOptionParser(formats, usepots=False, usetemplates=True, description=__doc__)\n parser.add_option(\"-c\", \"--context\", dest=\"context\", default=None,\n help=\"use supplied context instead of the one in the .po file comment\")\n parser.passthrough.append(\"context\")\n parser.run(argv)\n\n\nif __name__ == '__main__':\n main()\n",
"path": "translate/convert/po2ts.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2004-2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Convert Gettext PO localization files to Qt Linguist (.ts) files.\n\nSee: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/ts2po.html\nfor examples and usage instructions.\n\"\"\"\n\nfrom translate.storage import po, ts\n\n\nclass po2ts(object):\n\n def convertstore(self, inputstore, templatefile=None, context=None):\n \"\"\"converts a .po file to .ts format (using a template .ts file if given)\"\"\"\n if templatefile is None:\n tsfile = ts.QtTsParser()\n else:\n tsfile = ts.QtTsParser(templatefile)\n for inputunit in inputstore.units:\n if inputunit.isheader() or inputunit.isblank():\n continue\n source = inputunit.source\n translation = inputunit.target\n comment = inputunit.getnotes(\"translator\")\n transtype = None\n if not inputunit.istranslated():\n transtype = \"unfinished\"\n elif inputunit.getnotes(\"developer\") == \"(obsolete)\":\n transtype = \"obsolete\"\n if isinstance(source, bytes):\n source = source.decode(\"utf-8\")\n if isinstance(translation, bytes):\n translation = translation.decode(\"utf-8\")\n for sourcelocation in inputunit.getlocations():\n if context is None:\n if \"#\" in sourcelocation:\n contextname = sourcelocation[:sourcelocation.find(\"#\")]\n else:\n contextname = sourcelocation\n else:\n contextname = context\n tsfile.addtranslation(contextname, source, translation, comment, transtype, createifmissing=True)\n return tsfile.getxml()\n\n\ndef convertpo(inputfile, outputfile, templatefile, context):\n \"\"\"reads in stdin using fromfileclass, converts using convertorclass, writes to stdout\"\"\"\n inputstore = po.pofile(inputfile)\n if inputstore.isempty():\n return 0\n convertor = po2ts()\n outputstring = convertor.convertstore(inputstore, templatefile, context)\n outputfile.write(outputstring.encode('utf-8'))\n return 1\n\n\ndef main(argv=None):\n from translate.convert import convert\n formats = {\"po\": (\"ts\", convertpo), (\"po\", \"ts\"): (\"ts\", convertpo)}\n parser = convert.ConvertOptionParser(formats, usepots=False, usetemplates=True, description=__doc__)\n parser.add_option(\"-c\", \"--context\", dest=\"context\", default=None,\n help=\"use supplied context instead of the one in the .po file comment\")\n parser.passthrough.append(\"context\")\n parser.run(argv)\n\n\nif __name__ == '__main__':\n main()\n",
"path": "translate/convert/po2ts.py"
}
] | diff --git a/tests/cli/data/test_po2ts/one.po b/tests/cli/data/test_po2ts/one.po
new file mode 100644
index 0000000000..0be70c8135
--- /dev/null
+++ b/tests/cli/data/test_po2ts/one.po
@@ -0,0 +1,7 @@
+#: simple.cpp
+msgid "One"
+msgstr "Een"
+
+#: unicode.cpp
+msgid "†wo"
+msgstr "†wee"
diff --git a/tests/cli/data/test_po2ts/out.txt b/tests/cli/data/test_po2ts/out.txt
new file mode 100644
index 0000000000..9a08463ffe
--- /dev/null
+++ b/tests/cli/data/test_po2ts/out.txt
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!DOCTYPE TS>
+<TS>
+ <context>
+ <name>simple.cpp</name>
+ <message>
+ <source>One</source>
+ <translation>Een</translation>
+ </message>
+ </context>
+ <context>
+ <name>unicode.cpp</name>
+ <message>
+ <source>†wo</source>
+ <translation>†wee</translation>
+ </message>
+ </context>
+</TS>
\ No newline at end of file
diff --git a/tests/cli/test_po2ts.sh b/tests/cli/test_po2ts.sh
new file mode 100755
index 0000000000..758c28f9ec
--- /dev/null
+++ b/tests/cli/test_po2ts.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+source $(dirname $0)/test.inc.sh
+
+po2ts --progress=none $one $out
+check_results
diff --git a/translate/convert/po2ts.py b/translate/convert/po2ts.py
index 36cf0cb416..979949ccc1 100755
--- a/translate/convert/po2ts.py
+++ b/translate/convert/po2ts.py
@@ -69,7 +69,7 @@ def convertpo(inputfile, outputfile, templatefile, context):
return 0
convertor = po2ts()
outputstring = convertor.convertstore(inputstore, templatefile, context)
- outputfile.write(outputstring)
+ outputfile.write(outputstring.encode('utf-8'))
return 1
diff --git a/translate/convert/test_po2ts.py b/translate/convert/test_po2ts.py
index 347f767ff1..0a5d1b3f0b 100644
--- a/translate/convert/test_po2ts.py
+++ b/translate/convert/test_po2ts.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
from translate.convert import po2ts, test_convert
from translate.misc import wStringIO
from translate.storage import po
@@ -30,6 +32,18 @@ def test_simpleunit(self):
assert "<translation>asdf</translation>" in tsfile
assert "<comment>" not in tsfile
+ def test_simple_unicode_unit(self):
+ """checks that a simple unit with unicode strings"""
+ minipo = r'''#: unicode.cpp
+msgid "ßource"
+msgstr "†arget"'''
+ tsfile = self.po2ts(minipo)
+ print(tsfile)
+ print(type(tsfile))
+ assert u"<name>unicode.cpp</name>" in tsfile
+ assert u"<source>ßource</source>" in tsfile
+ assert u"<translation>†arget</translation>" in tsfile
+
def test_fullunit(self):
"""check that an entry with various settings is converted correctly"""
posource = '''# Translator comment
|
Textualize__rich-2642 | [BUG] export_html leaks style into the page
**Describe the bug**
Following discussion https://github.com/Textualize/rich/discussions/2640
Injecting HTML generated with Rich's `Console.export_html()` method seems to leak some style into the page.

The page is built with MkDocs + Material for MkDocs. The dark theme is activated.
The light theme is less impacted, I just see extra "bars" (like an empty code line) in-between block elements:

Code used to generate the HTML:
```python
import os
from rich.console import Console
report = """$ griffe check griffe -ssrc -b0.24.0 -a0.23.0
[bold]src/griffe/loader.py[/]:156: GriffeLoader.resolve_aliases([blue]only_exported[/]): [yellow]Parameter kind was changed[/]: positional or keyword -> keyword-only
etc.
"""
with open(os.devnull, "w") as devnull:
console = Console(record=True, width=150, file=devnull)
console.print(report, markup=True, highlight=False)
print(console.export_html(inline_styles=True))
```
Both `inline_styles=True` and `inline_styles=False` give the same result.
**Platform**
<details>
<summary>Click to expand</summary>
Linux
```console
$ python -m rich.diagnose
╭───────────────────────── <class 'rich.console.Console'> ─────────────────────────╮
│ A high level console interface. │
│ │
│ ╭──────────────────────────────────────────────────────────────────────────────╮ │
│ │ <console width=239 ColorSystem.TRUECOLOR> │ │
│ ╰──────────────────────────────────────────────────────────────────────────────╯ │
│ │
│ color_system = 'truecolor' │
│ encoding = 'utf-8' │
│ file = <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'> │
│ height = 58 │
│ is_alt_screen = False │
│ is_dumb_terminal = False │
│ is_interactive = True │
│ is_jupyter = False │
│ is_terminal = True │
│ legacy_windows = False │
│ no_color = False │
│ options = ConsoleOptions( │
│ size=ConsoleDimensions(width=239, height=58), │
│ legacy_windows=False, │
│ min_width=1, │
│ max_width=239, │
│ is_terminal=True, │
│ encoding='utf-8', │
│ max_height=58, │
│ justify=None, │
│ overflow=None, │
│ no_wrap=False, │
│ highlight=None, │
│ markup=None, │
│ height=None │
│ ) │
│ quiet = False │
│ record = False │
│ safe_box = True │
│ size = ConsoleDimensions(width=239, height=58) │
│ soft_wrap = False │
│ stderr = False │
│ style = None │
│ tab_size = 8 │
│ width = 239 │
╰──────────────────────────────────────────────────────────────────────────────────╯
╭─── <class 'rich._windows.WindowsConsoleFeatures'> ────╮
│ Windows features available. │
│ │
│ ╭───────────────────────────────────────────────────╮ │
│ │ WindowsConsoleFeatures(vt=False, truecolor=False) │ │
│ ╰───────────────────────────────────────────────────╯ │
│ │
│ truecolor = False │
│ vt = False │
╰───────────────────────────────────────────────────────╯
╭────── Environment Variables ───────╮
│ { │
│ 'TERM': 'xterm-256color', │
│ 'COLORTERM': 'truecolor', │
│ 'CLICOLOR': None, │
│ 'NO_COLOR': None, │
│ 'TERM_PROGRAM': None, │
│ 'COLUMNS': None, │
│ 'LINES': None, │
│ 'JUPYTER_COLUMNS': None, │
│ 'JUPYTER_LINES': None, │
│ 'JPY_PARENT_PID': None, │
│ 'VSCODE_VERBOSE_LOGGING': None │
│ } │
╰────────────────────────────────────╯
platform="Linux"
```
```console
% pdm list --freeze | grep rich
rich==12.6.0
```
</details>
| [
{
"content": "CONSOLE_HTML_FORMAT = \"\"\"\\\n<!DOCTYPE html>\n<head>\n<meta charset=\"UTF-8\">\n<style>\n{stylesheet}\nbody {{\n color: {foreground};\n background-color: {background};\n}}\n</style>\n</head>\n<html>\n<body>\n <code>\n <pre style=\"font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">{code}</pre>\n </code>\n</body>\n</html>\n\"\"\"\n\nCONSOLE_SVG_FORMAT = \"\"\"\\\n<svg class=\"rich-terminal\" viewBox=\"0 0 {width} {height}\" xmlns=\"http://www.w3.org/2000/svg\">\n <!-- Generated with Rich https://www.textualize.io -->\n <style>\n\n @font-face {{\n font-family: \"Fira Code\";\n src: local(\"FiraCode-Regular\"),\n url(\"https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff2/FiraCode-Regular.woff2\") format(\"woff2\"),\n url(\"https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff/FiraCode-Regular.woff\") format(\"woff\");\n font-style: normal;\n font-weight: 400;\n }}\n @font-face {{\n font-family: \"Fira Code\";\n src: local(\"FiraCode-Bold\"),\n url(\"https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff2/FiraCode-Bold.woff2\") format(\"woff2\"),\n url(\"https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff/FiraCode-Bold.woff\") format(\"woff\");\n font-style: bold;\n font-weight: 700;\n }}\n\n .{unique_id}-matrix {{\n font-family: Fira Code, monospace;\n font-size: {char_height}px;\n line-height: {line_height}px;\n font-variant-east-asian: full-width;\n }}\n\n .{unique_id}-title {{\n font-size: 18px;\n font-weight: bold;\n font-family: arial;\n }}\n\n {styles}\n </style>\n\n <defs>\n <clipPath id=\"{unique_id}-clip-terminal\">\n <rect x=\"0\" y=\"0\" width=\"{terminal_width}\" height=\"{terminal_height}\" />\n </clipPath>\n {lines}\n </defs>\n\n {chrome}\n <g transform=\"translate({terminal_x}, {terminal_y})\" clip-path=\"url(#{unique_id}-clip-terminal)\">\n {backgrounds}\n <g class=\"{unique_id}-matrix\">\n {matrix}\n </g>\n </g>\n</svg>\n\"\"\"\n\n_SVG_FONT_FAMILY = \"Rich Fira Code\"\n_SVG_CLASSES_PREFIX = \"rich-svg\"\n",
"path": "rich/_export_format.py"
}
] | [
{
"content": "CONSOLE_HTML_FORMAT = \"\"\"\\\n<!DOCTYPE html>\n<head>\n<meta charset=\"UTF-8\">\n<style>\n{stylesheet}\nbody {{\n color: {foreground};\n background-color: {background};\n}}\n</style>\n</head>\n<html>\n<body>\n <pre style=\"font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">\n <code>{code}</code>\n </pre>\n</body>\n</html>\n\"\"\"\n\nCONSOLE_SVG_FORMAT = \"\"\"\\\n<svg class=\"rich-terminal\" viewBox=\"0 0 {width} {height}\" xmlns=\"http://www.w3.org/2000/svg\">\n <!-- Generated with Rich https://www.textualize.io -->\n <style>\n\n @font-face {{\n font-family: \"Fira Code\";\n src: local(\"FiraCode-Regular\"),\n url(\"https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff2/FiraCode-Regular.woff2\") format(\"woff2\"),\n url(\"https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff/FiraCode-Regular.woff\") format(\"woff\");\n font-style: normal;\n font-weight: 400;\n }}\n @font-face {{\n font-family: \"Fira Code\";\n src: local(\"FiraCode-Bold\"),\n url(\"https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff2/FiraCode-Bold.woff2\") format(\"woff2\"),\n url(\"https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff/FiraCode-Bold.woff\") format(\"woff\");\n font-style: bold;\n font-weight: 700;\n }}\n\n .{unique_id}-matrix {{\n font-family: Fira Code, monospace;\n font-size: {char_height}px;\n line-height: {line_height}px;\n font-variant-east-asian: full-width;\n }}\n\n .{unique_id}-title {{\n font-size: 18px;\n font-weight: bold;\n font-family: arial;\n }}\n\n {styles}\n </style>\n\n <defs>\n <clipPath id=\"{unique_id}-clip-terminal\">\n <rect x=\"0\" y=\"0\" width=\"{terminal_width}\" height=\"{terminal_height}\" />\n </clipPath>\n {lines}\n </defs>\n\n {chrome}\n <g transform=\"translate({terminal_x}, {terminal_y})\" clip-path=\"url(#{unique_id}-clip-terminal)\">\n {backgrounds}\n <g class=\"{unique_id}-matrix\">\n {matrix}\n </g>\n </g>\n</svg>\n\"\"\"\n\n_SVG_FONT_FAMILY = \"Rich Fira Code\"\n_SVG_CLASSES_PREFIX = \"rich-svg\"\n",
"path": "rich/_export_format.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 25eb9a908..1e391a37f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [13.0.0] - Unreleased
+### Fixed
+
+- Reversed `pre` and `code` tags in base HTML format https://github.com/Textualize/rich/pull/2642
+
### Changed
- Bumped minimum Python version to 3.7 https://github.com/Textualize/rich/pull/2567
diff --git a/rich/_export_format.py b/rich/_export_format.py
index b79c13069..ea4020904 100644
--- a/rich/_export_format.py
+++ b/rich/_export_format.py
@@ -12,9 +12,9 @@
</head>
<html>
<body>
- <code>
- <pre style="font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">{code}</pre>
- </code>
+ <pre style="font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">
+ <code>{code}</code>
+ </pre>
</body>
</html>
"""
diff --git a/tests/test_console.py b/tests/test_console.py
index e1aaaefd3..8c72e8da0 100644
--- a/tests/test_console.py
+++ b/tests/test_console.py
@@ -528,7 +528,7 @@ def test_export_html():
console = Console(record=True, width=100)
console.print("[b]foo <script> 'test' [link=https://example.org]Click[/link]")
html = console.export_html()
- expected = '<!DOCTYPE html>\n<head>\n<meta charset="UTF-8">\n<style>\n.r1 {font-weight: bold}\n.r2 {color: #ff00ff; text-decoration-color: #ff00ff; font-weight: bold}\n.r3 {color: #008000; text-decoration-color: #008000; font-weight: bold}\nbody {\n color: #000000;\n background-color: #ffffff;\n}\n</style>\n</head>\n<html>\n<body>\n <code>\n <pre style="font-family:Menlo,\'DejaVu Sans Mono\',consolas,\'Courier New\',monospace"><span class="r1">foo <</span><span class="r2">script</span><span class="r1">> </span><span class="r3">'test'</span><span class="r1"> </span><a class="r1" href="https://example.org">Click</a>\n</pre>\n </code>\n</body>\n</html>\n'
+ expected = '<!DOCTYPE html>\n<head>\n<meta charset="UTF-8">\n<style>\n.r1 {font-weight: bold}\n.r2 {color: #ff00ff; text-decoration-color: #ff00ff; font-weight: bold}\n.r3 {color: #008000; text-decoration-color: #008000; font-weight: bold}\nbody {\n color: #000000;\n background-color: #ffffff;\n}\n</style>\n</head>\n<html>\n<body>\n <pre style="font-family:Menlo,\'DejaVu Sans Mono\',consolas,\'Courier New\',monospace">\n <code><span class="r1">foo <</span><span class="r2">script</span><span class="r1">> </span><span class="r3">'test'</span><span class="r1"> </span><a class="r1" href="https://example.org">Click</a>\n</code>\n </pre>\n</body>\n</html>\n'
assert html == expected
@@ -536,7 +536,7 @@ def test_export_html_inline():
console = Console(record=True, width=100)
console.print("[b]foo [link=https://example.org]Click[/link]")
html = console.export_html(inline_styles=True)
- expected = '<!DOCTYPE html>\n<head>\n<meta charset="UTF-8">\n<style>\n\nbody {\n color: #000000;\n background-color: #ffffff;\n}\n</style>\n</head>\n<html>\n<body>\n <code>\n <pre style="font-family:Menlo,\'DejaVu Sans Mono\',consolas,\'Courier New\',monospace"><span style="font-weight: bold">foo </span><span style="font-weight: bold"><a href="https://example.org">Click</a></span>\n</pre>\n </code>\n</body>\n</html>\n'
+ expected = '<!DOCTYPE html>\n<head>\n<meta charset="UTF-8">\n<style>\n\nbody {\n color: #000000;\n background-color: #ffffff;\n}\n</style>\n</head>\n<html>\n<body>\n <pre style="font-family:Menlo,\'DejaVu Sans Mono\',consolas,\'Courier New\',monospace">\n <code><span style="font-weight: bold">foo </span><span style="font-weight: bold"><a href="https://example.org">Click</a></span>\n</code>\n </pre>\n</body>\n</html>\n'
assert html == expected
@@ -589,7 +589,7 @@ def test_save_text():
def test_save_html():
- expected = "<!DOCTYPE html>\n<head>\n<meta charset=\"UTF-8\">\n<style>\n\nbody {\n color: #000000;\n background-color: #ffffff;\n}\n</style>\n</head>\n<html>\n<body>\n <code>\n <pre style=\"font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">foo\n</pre>\n </code>\n</body>\n</html>\n"
+ expected = "<!DOCTYPE html>\n<head>\n<meta charset=\"UTF-8\">\n<style>\n\nbody {\n color: #000000;\n background-color: #ffffff;\n}\n</style>\n</head>\n<html>\n<body>\n <pre style=\"font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">\n <code>foo\n</code>\n </pre>\n</body>\n</html>\n"
console = Console(record=True, width=100)
console.print("foo")
with tempfile.TemporaryDirectory() as path:
|
keras-team__keras-1039 | keras.utils.visualize_util
line 9: if type(model) == Sequential
Global name Sequential is not defined
line 25 elif type(model) == Graph:
Global name Graph is not defined
| [
{
"content": "import pydot\n# old pydot will not work with python3, must use one\n# that works with python3 such as pydot2 or pydot\n\n\ndef plot(model, to_file='model.png'):\n\n graph = pydot.Dot(graph_type='digraph')\n if type(model) == Sequential:\n previous_node = None\n written_nodes = []\n n = 1\n for node in model.get_config()['layers']:\n # append number in case layers have same name to differentiate\n if (node['name'] + str(n)) in written_nodes:\n n += 1\n current_node = pydot.Node(node['name'] + str(n))\n written_nodes.append(node['name'] + str(n))\n graph.add_node(current_node)\n if previous_node:\n graph.add_edge(pydot.Edge(previous_node, current_node))\n previous_node = current_node\n graph.write_png(to_file)\n\n elif type(model) == Graph:\n # don't need to append number for names since all nodes labeled\n for input_node in model.input_config:\n graph.add_node(pydot.Node(input_node['name']))\n\n # intermediate and output nodes have input defined\n for layer_config in [model.node_config, model.output_config]:\n for node in layer_config:\n graph.add_node(pydot.Node(node['name']))\n # possible to have multiple 'inputs' vs 1 'input'\n if node['inputs']:\n for e in node['inputs']:\n graph.add_edge(pydot.Edge(e, node['name']))\n else:\n graph.add_edge(pydot.Edge(node['input'], node['name']))\n\n graph.write_png(to_file)\n",
"path": "keras/utils/visualize_util.py"
}
] | [
{
"content": "import pydot\n# old pydot will not work with python3, must use one\n# that works with python3 such as pydot2 or pydot\nfrom keras.models import Sequential, Graph\n\ndef plot(model, to_file='model.png'):\n\n graph = pydot.Dot(graph_type='digraph')\n if type(model) == Sequential:\n previous_node = None\n written_nodes = []\n n = 1\n for node in model.get_config()['layers']:\n # append number in case layers have same name to differentiate\n if (node['name'] + str(n)) in written_nodes:\n n += 1\n current_node = pydot.Node(node['name'] + str(n))\n written_nodes.append(node['name'] + str(n))\n graph.add_node(current_node)\n if previous_node:\n graph.add_edge(pydot.Edge(previous_node, current_node))\n previous_node = current_node\n graph.write_png(to_file)\n\n elif type(model) == Graph:\n # don't need to append number for names since all nodes labeled\n for input_node in model.input_config:\n graph.add_node(pydot.Node(input_node['name']))\n\n # intermediate and output nodes have input defined\n for layer_config in [model.node_config, model.output_config]:\n for node in layer_config:\n graph.add_node(pydot.Node(node['name']))\n # possible to have multiple 'inputs' vs 1 'input'\n if node['inputs']:\n for e in node['inputs']:\n graph.add_edge(pydot.Edge(e, node['name']))\n else:\n graph.add_edge(pydot.Edge(node['input'], node['name']))\n\n graph.write_png(to_file)\n",
"path": "keras/utils/visualize_util.py"
}
] | diff --git a/keras/utils/visualize_util.py b/keras/utils/visualize_util.py
index f3610abd09b9..55bfd557fd4b 100644
--- a/keras/utils/visualize_util.py
+++ b/keras/utils/visualize_util.py
@@ -1,7 +1,7 @@
import pydot
# old pydot will not work with python3, must use one
# that works with python3 such as pydot2 or pydot
-
+from keras.models import Sequential, Graph
def plot(model, to_file='model.png'):
|
microsoft__botbuilder-python-1303 | Bump azure-cosmos to v3.2.0
**Is your feature request related to a problem? Please describe.**
We're currently on `azure-cosmos` v3.1.2. Not a ton of changes in 3.2.0, but it looks like it will be their last stable version, now that they're working on v4:

**Additional context**
Need to ensure all Cosmos tests are run live before merging (they're skipped by default).
[enhancement]
| [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"azure-cosmos==3.1.2\",\n \"azure-storage-blob==2.1.0\",\n \"botbuilder-schema==4.10.0\",\n \"botframework-connector==4.10.0\",\n \"jsonpickle==1.2\",\n]\nTEST_REQUIRES = [\"aiounittest==1.3.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"azure\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotBuilderAzure\", \"bots\", \"ai\", \"botframework\", \"botbuilder\", \"azure\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\"botbuilder.azure\"],\n install_requires=REQUIRES + TEST_REQUIRES,\n tests_require=TEST_REQUIRES,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n",
"path": "libraries/botbuilder-azure/setup.py"
}
] | [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"azure-cosmos==3.2.0\",\n \"azure-storage-blob==2.1.0\",\n \"botbuilder-schema==4.10.0\",\n \"botframework-connector==4.10.0\",\n \"jsonpickle==1.2\",\n]\nTEST_REQUIRES = [\"aiounittest==1.3.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"azure\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotBuilderAzure\", \"bots\", \"ai\", \"botframework\", \"botbuilder\", \"azure\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\"botbuilder.azure\"],\n install_requires=REQUIRES + TEST_REQUIRES,\n tests_require=TEST_REQUIRES,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n",
"path": "libraries/botbuilder-azure/setup.py"
}
] | diff --git a/libraries/botbuilder-azure/setup.py b/libraries/botbuilder-azure/setup.py
index 7b1a77c64..50ae09a60 100644
--- a/libraries/botbuilder-azure/setup.py
+++ b/libraries/botbuilder-azure/setup.py
@@ -5,7 +5,7 @@
from setuptools import setup
REQUIRES = [
- "azure-cosmos==3.1.2",
+ "azure-cosmos==3.2.0",
"azure-storage-blob==2.1.0",
"botbuilder-schema==4.10.0",
"botframework-connector==4.10.0",
|
pytorch__text-81 | min_freq=0 bug
**Noticed:**
```
>>>some_field.build_vocab(some_dataset, min_freq=0)
>>>padding_idx = some_field.vocab.stoi['<pad'>]
>>>print(padding_idx, '<pad>')
12 <pad>
```
Looks like <pad> is not equal to 1 which is not okay.
Printed `stoi` and `itos` as well:
```
>>>print(some_field.vocab.stoi)
defaultdict(<function Vocab.__init__.<locals>.<lambda> at 0x103f4f0d0>, {'<pad>': 12, '1': 2, '2': 3, '9': 4, '0': 5, '5': 6, '4': 7, '6': 8, '8': 9, '3': 10, '7': 11, '<unk>': 13})
>>>print(some_field.vocab.itos)
['<unk>', '<pad>', '1', '2', '9', '0', '5', '4', '6', '8', '3', '7', '<pad>', '<unk>']
```
**Possible reason:**
Counter subtract does remove the specials but puts their count at 0.
`counter.subtract({tok: counter[tok] for tok in ['<unk>'] + specials})`
**Possible solution:**
Throw an error if `min_freq < 1`
| [
{
"content": "from __future__ import print_function\nimport array\nfrom collections import defaultdict\nimport os\nimport zipfile\n\nimport six\nfrom six.moves.urllib.request import urlretrieve\nimport torch\nfrom tqdm import trange, tqdm\n\nfrom .utils import reporthook\n\nURL = {\n 'glove.42B': 'http://nlp.stanford.edu/data/glove.42B.300d.zip',\n 'glove.840B': 'http://nlp.stanford.edu/data/glove.840B.300d.zip',\n 'glove.twitter.27B': 'http://nlp.stanford.edu/data/glove.twitter.27B.zip',\n 'glove.6B': 'http://nlp.stanford.edu/data/glove.6B.zip'\n}\n\n\ndef load_word_vectors(root, wv_type, dim):\n \"\"\"Load word vectors from a path, trying .pt, .txt, and .zip extensions.\"\"\"\n if isinstance(dim, int):\n dim = str(dim) + 'd'\n fname = os.path.join(root, wv_type + '.' + dim)\n if os.path.isfile(fname + '.pt'):\n fname_pt = fname + '.pt'\n print('loading word vectors from', fname_pt)\n return torch.load(fname_pt)\n if os.path.isfile(fname + '.txt'):\n fname_txt = fname + '.txt'\n cm = open(fname_txt, 'rb')\n cm = [line for line in cm]\n elif os.path.basename(wv_type) in URL:\n url = URL[wv_type]\n print('downloading word vectors from {}'.format(url))\n filename = os.path.basename(fname)\n if not os.path.exists(root):\n os.makedirs(root)\n with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:\n fname, _ = urlretrieve(url, fname, reporthook=reporthook(t))\n with zipfile.ZipFile(fname, \"r\") as zf:\n print('extracting word vectors into {}'.format(root))\n zf.extractall(root)\n if not os.path.isfile(fname + '.txt'):\n raise RuntimeError('no word vectors of requested dimension found')\n return load_word_vectors(root, wv_type, dim)\n else:\n raise RuntimeError('unable to load word vectors')\n\n wv_tokens, wv_arr, wv_size = [], array.array('d'), None\n if cm is not None:\n print(\"Loading word vectors from {}\".format(fname_txt))\n for line in trange(len(cm)):\n entries = cm[line].strip().split(b' ')\n word, entries = entries[0], entries[1:]\n if wv_size is None:\n wv_size = len(entries)\n try:\n if isinstance(word, six.binary_type):\n word = word.decode('utf-8')\n except:\n print('non-UTF8 token', repr(word), 'ignored')\n continue\n wv_arr.extend(float(x) for x in entries)\n wv_tokens.append(word)\n\n wv_dict = {word: i for i, word in enumerate(wv_tokens)}\n wv_arr = torch.Tensor(wv_arr).view(-1, wv_size)\n ret = (wv_dict, wv_arr, wv_size)\n torch.save(ret, fname + '.pt')\n return ret\n\n\nclass Vocab(object):\n \"\"\"Defines a vocabulary object that will be used to numericalize a field.\n\n Attributes:\n freqs: A collections.Counter object holding the frequencies of tokens\n in the data used to build the Vocab.\n stoi: A collections.defaultdict instance mapping token strings to\n numerical identifiers.\n itos: A list of token strings indexed by their numerical identifiers.\n vectors: A Tensor containing word vectors for the tokens in the Vocab,\n if a word vector file has been provided.\n \"\"\"\n\n def __init__(self, counter, max_size=None, min_freq=1, wv_dir=os.getcwd(),\n wv_type=None, wv_dim=300, unk_init='random',\n specials=['<pad>'], fill_from_vectors=False):\n \"\"\"Create a Vocab object from a collections.Counter.\n\n Arguments:\n counter: collections.Counter object holding the frequencies of\n each value found in the data.\n max_size: The maximum size of the vocabulary, or None for no\n maximum. Default: None.\n min_freq: The minimum frequency needed to include a token in the\n vocabulary. Default: 1.\n wv_dir: directory containing word vector file and destination for\n downloaded word vector files\n wv_type: type of word vectors; None for no word vectors\n wv_dim: dimension of word vectors\n specials: The list of special tokens (e.g., padding or eos) that\n will be prepended to the vocabulary in addition to an <unk>\n token.\n fill_from_vectors: Whether to add to the vocabulary every token\n for which a word vector specified by vectors is present\n even if the token does not appear in the provided data.\n unk_init: default to random initialization for word vectors not in the\n pretrained word vector file; otherwise set to zero\n \"\"\"\n self.freqs = counter.copy()\n self.unk_init = unk_init\n counter.update(['<unk>'] + specials)\n\n if wv_type is not None:\n wv_dict, wv_arr, self.wv_size = load_word_vectors(wv_dir, wv_type, wv_dim)\n\n if fill_from_vectors:\n counter.update(wv_dict.keys())\n\n self.stoi = defaultdict(lambda: 0)\n self.stoi.update({tok: i + 1 for i, tok in enumerate(specials)})\n self.itos = ['<unk>'] + specials\n\n counter.subtract({tok: counter[tok] for tok in ['<unk>'] + specials})\n max_size = None if max_size is None else max_size + len(self.itos)\n\n # sort by frequency, then alphabetically\n words = sorted(counter.items(), key=lambda tup: tup[0])\n words.sort(key=lambda tup: tup[1], reverse=True)\n\n for k, v in words:\n if v < min_freq or len(self.itos) == max_size:\n break\n self.itos.append(k)\n self.stoi[k] = len(self.itos) - 1\n\n if wv_type is not None:\n self.set_vectors(wv_dict, wv_arr)\n\n def __len__(self):\n return len(self.itos)\n\n def load_vectors(self, wv_dir=os.getcwd(), wv_type=None, wv_dim=300,\n unk_init='random'):\n \"\"\"Loads word vectors into the vocab\n\n Arguments:\n wv_dir: directory containing word vector file and destination for\n downloaded word vector files\n wv_type: type of word vectors; None for no word vectors\n wv_dim: dimension of word vectors\n\n unk_init: default to random initialization for unknown word vectors;\n otherwise set to zero\n \"\"\"\n self.unk_init = unk_init\n wv_dict, wv_arr, self.wv_size = load_word_vectors(wv_dir, wv_type, wv_dim)\n self.set_vectors(wv_dict, wv_arr)\n\n def set_vectors(self, wv_dict, wv_arr):\n self.vectors = torch.Tensor(len(self), self.wv_size)\n self.vectors.normal_(0, 1) if self.unk_init == 'random' else self.vectors.zero_()\n for i, token in enumerate(self.itos):\n wv_index = wv_dict.get(token, None)\n if wv_index is not None:\n self.vectors[i] = wv_arr[wv_index]\n",
"path": "torchtext/vocab.py"
}
] | [
{
"content": "from __future__ import print_function\nimport array\nfrom collections import defaultdict\nimport os\nimport zipfile\n\nimport six\nfrom six.moves.urllib.request import urlretrieve\nimport torch\nfrom tqdm import trange, tqdm\n\nfrom .utils import reporthook\n\nURL = {\n 'glove.42B': 'http://nlp.stanford.edu/data/glove.42B.300d.zip',\n 'glove.840B': 'http://nlp.stanford.edu/data/glove.840B.300d.zip',\n 'glove.twitter.27B': 'http://nlp.stanford.edu/data/glove.twitter.27B.zip',\n 'glove.6B': 'http://nlp.stanford.edu/data/glove.6B.zip'\n}\n\n\ndef load_word_vectors(root, wv_type, dim):\n \"\"\"Load word vectors from a path, trying .pt, .txt, and .zip extensions.\"\"\"\n if isinstance(dim, int):\n dim = str(dim) + 'd'\n fname = os.path.join(root, wv_type + '.' + dim)\n if os.path.isfile(fname + '.pt'):\n fname_pt = fname + '.pt'\n print('loading word vectors from', fname_pt)\n return torch.load(fname_pt)\n if os.path.isfile(fname + '.txt'):\n fname_txt = fname + '.txt'\n cm = open(fname_txt, 'rb')\n cm = [line for line in cm]\n elif os.path.basename(wv_type) in URL:\n url = URL[wv_type]\n print('downloading word vectors from {}'.format(url))\n filename = os.path.basename(fname)\n if not os.path.exists(root):\n os.makedirs(root)\n with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:\n fname, _ = urlretrieve(url, fname, reporthook=reporthook(t))\n with zipfile.ZipFile(fname, \"r\") as zf:\n print('extracting word vectors into {}'.format(root))\n zf.extractall(root)\n if not os.path.isfile(fname + '.txt'):\n raise RuntimeError('no word vectors of requested dimension found')\n return load_word_vectors(root, wv_type, dim)\n else:\n raise RuntimeError('unable to load word vectors')\n\n wv_tokens, wv_arr, wv_size = [], array.array('d'), None\n if cm is not None:\n print(\"Loading word vectors from {}\".format(fname_txt))\n for line in trange(len(cm)):\n entries = cm[line].strip().split(b' ')\n word, entries = entries[0], entries[1:]\n if wv_size is None:\n wv_size = len(entries)\n try:\n if isinstance(word, six.binary_type):\n word = word.decode('utf-8')\n except:\n print('non-UTF8 token', repr(word), 'ignored')\n continue\n wv_arr.extend(float(x) for x in entries)\n wv_tokens.append(word)\n\n wv_dict = {word: i for i, word in enumerate(wv_tokens)}\n wv_arr = torch.Tensor(wv_arr).view(-1, wv_size)\n ret = (wv_dict, wv_arr, wv_size)\n torch.save(ret, fname + '.pt')\n return ret\n\n\nclass Vocab(object):\n \"\"\"Defines a vocabulary object that will be used to numericalize a field.\n\n Attributes:\n freqs: A collections.Counter object holding the frequencies of tokens\n in the data used to build the Vocab.\n stoi: A collections.defaultdict instance mapping token strings to\n numerical identifiers.\n itos: A list of token strings indexed by their numerical identifiers.\n vectors: A Tensor containing word vectors for the tokens in the Vocab,\n if a word vector file has been provided.\n \"\"\"\n\n def __init__(self, counter, max_size=None, min_freq=1, wv_dir=os.getcwd(),\n wv_type=None, wv_dim=300, unk_init='random',\n specials=['<pad>'], fill_from_vectors=False):\n \"\"\"Create a Vocab object from a collections.Counter.\n\n Arguments:\n counter: collections.Counter object holding the frequencies of\n each value found in the data.\n max_size: The maximum size of the vocabulary, or None for no\n maximum. Default: None.\n min_freq: The minimum frequency needed to include a token in the\n vocabulary. Default: 1.\n wv_dir: directory containing word vector file and destination for\n downloaded word vector files\n wv_type: type of word vectors; None for no word vectors\n wv_dim: dimension of word vectors\n specials: The list of special tokens (e.g., padding or eos) that\n will be prepended to the vocabulary in addition to an <unk>\n token.\n fill_from_vectors: Whether to add to the vocabulary every token\n for which a word vector specified by vectors is present\n even if the token does not appear in the provided data.\n unk_init: default to random initialization for word vectors not in the\n pretrained word vector file; otherwise set to zero\n \"\"\"\n self.freqs = counter.copy()\n self.unk_init = unk_init\n min_freq = max(min_freq, 1)\n counter.update(['<unk>'] + specials)\n\n if wv_type is not None:\n wv_dict, wv_arr, self.wv_size = load_word_vectors(wv_dir, wv_type, wv_dim)\n\n if fill_from_vectors:\n counter.update(wv_dict.keys())\n\n self.stoi = defaultdict(lambda: 0)\n self.stoi.update({tok: i + 1 for i, tok in enumerate(specials)})\n self.itos = ['<unk>'] + specials\n\n counter.subtract({tok: counter[tok] for tok in ['<unk>'] + specials})\n max_size = None if max_size is None else max_size + len(self.itos)\n\n # sort by frequency, then alphabetically\n words = sorted(counter.items(), key=lambda tup: tup[0])\n words.sort(key=lambda tup: tup[1], reverse=True)\n\n for k, v in words:\n if v < min_freq or len(self.itos) == max_size:\n break\n self.itos.append(k)\n self.stoi[k] = len(self.itos) - 1\n\n if wv_type is not None:\n self.set_vectors(wv_dict, wv_arr)\n\n def __len__(self):\n return len(self.itos)\n\n def load_vectors(self, wv_dir=os.getcwd(), wv_type=None, wv_dim=300,\n unk_init='random'):\n \"\"\"Loads word vectors into the vocab\n\n Arguments:\n wv_dir: directory containing word vector file and destination for\n downloaded word vector files\n wv_type: type of word vectors; None for no word vectors\n wv_dim: dimension of word vectors\n\n unk_init: default to random initialization for unknown word vectors;\n otherwise set to zero\n \"\"\"\n self.unk_init = unk_init\n wv_dict, wv_arr, self.wv_size = load_word_vectors(wv_dir, wv_type, wv_dim)\n self.set_vectors(wv_dict, wv_arr)\n\n def set_vectors(self, wv_dict, wv_arr):\n self.vectors = torch.Tensor(len(self), self.wv_size)\n self.vectors.normal_(0, 1) if self.unk_init == 'random' else self.vectors.zero_()\n for i, token in enumerate(self.itos):\n wv_index = wv_dict.get(token, None)\n if wv_index is not None:\n self.vectors[i] = wv_arr[wv_index]\n",
"path": "torchtext/vocab.py"
}
] | diff --git a/torchtext/vocab.py b/torchtext/vocab.py
index 3f1e52015f..67153dccbf 100644
--- a/torchtext/vocab.py
+++ b/torchtext/vocab.py
@@ -113,6 +113,7 @@ def __init__(self, counter, max_size=None, min_freq=1, wv_dir=os.getcwd(),
"""
self.freqs = counter.copy()
self.unk_init = unk_init
+ min_freq = max(min_freq, 1)
counter.update(['<unk>'] + specials)
if wv_type is not None:
|
matrix-org__synapse-7630 | Update SSO UIAuth login identifier to m.login.sso
I'm not sure when exactly we do this, but [MSC2454](https://github.com/matrix-org/matrix-doc/pull/2454) was merged which identified `m.login.sso` as the identifier for SSO + UIAuth. Synapse is currently using `org.matrix.login.sso`. At some point we should switch to the standardized version.
| [
{
"content": "# -*- coding: utf-8 -*-\n# Copyright 2014-2016 OpenMarket Ltd\n# Copyright 2017 Vector Creations Ltd\n# Copyright 2018-2019 New Vector Ltd\n# Copyright 2019 The Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Contains constants from the specification.\"\"\"\n\n# the \"depth\" field on events is limited to 2**63 - 1\nMAX_DEPTH = 2 ** 63 - 1\n\n# the maximum length for a room alias is 255 characters\nMAX_ALIAS_LENGTH = 255\n\n# the maximum length for a user id is 255 characters\nMAX_USERID_LENGTH = 255\n\n\nclass Membership(object):\n\n \"\"\"Represents the membership states of a user in a room.\"\"\"\n\n INVITE = \"invite\"\n JOIN = \"join\"\n KNOCK = \"knock\"\n LEAVE = \"leave\"\n BAN = \"ban\"\n LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN)\n\n\nclass PresenceState(object):\n \"\"\"Represents the presence state of a user.\"\"\"\n\n OFFLINE = \"offline\"\n UNAVAILABLE = \"unavailable\"\n ONLINE = \"online\"\n\n\nclass JoinRules(object):\n PUBLIC = \"public\"\n KNOCK = \"knock\"\n INVITE = \"invite\"\n PRIVATE = \"private\"\n\n\nclass LoginType(object):\n PASSWORD = \"m.login.password\"\n EMAIL_IDENTITY = \"m.login.email.identity\"\n MSISDN = \"m.login.msisdn\"\n RECAPTCHA = \"m.login.recaptcha\"\n TERMS = \"m.login.terms\"\n SSO = \"org.matrix.login.sso\"\n DUMMY = \"m.login.dummy\"\n\n # Only for C/S API v1\n APPLICATION_SERVICE = \"m.login.application_service\"\n SHARED_SECRET = \"org.matrix.login.shared_secret\"\n\n\nclass EventTypes(object):\n Member = \"m.room.member\"\n Create = \"m.room.create\"\n Tombstone = \"m.room.tombstone\"\n JoinRules = \"m.room.join_rules\"\n PowerLevels = \"m.room.power_levels\"\n Aliases = \"m.room.aliases\"\n Redaction = \"m.room.redaction\"\n ThirdPartyInvite = \"m.room.third_party_invite\"\n RelatedGroups = \"m.room.related_groups\"\n\n RoomHistoryVisibility = \"m.room.history_visibility\"\n CanonicalAlias = \"m.room.canonical_alias\"\n Encrypted = \"m.room.encrypted\"\n RoomAvatar = \"m.room.avatar\"\n RoomEncryption = \"m.room.encryption\"\n GuestAccess = \"m.room.guest_access\"\n\n # These are used for validation\n Message = \"m.room.message\"\n Topic = \"m.room.topic\"\n Name = \"m.room.name\"\n\n ServerACL = \"m.room.server_acl\"\n Pinned = \"m.room.pinned_events\"\n\n Retention = \"m.room.retention\"\n\n Presence = \"m.presence\"\n\n\nclass RejectedReason(object):\n AUTH_ERROR = \"auth_error\"\n\n\nclass RoomCreationPreset(object):\n PRIVATE_CHAT = \"private_chat\"\n PUBLIC_CHAT = \"public_chat\"\n TRUSTED_PRIVATE_CHAT = \"trusted_private_chat\"\n\n\nclass ThirdPartyEntityKind(object):\n USER = \"user\"\n LOCATION = \"location\"\n\n\nServerNoticeMsgType = \"m.server_notice\"\nServerNoticeLimitReached = \"m.server_notice.usage_limit_reached\"\n\n\nclass UserTypes(object):\n \"\"\"Allows for user type specific behaviour. With the benefit of hindsight\n 'admin' and 'guest' users should also be UserTypes. Normal users are type None\n \"\"\"\n\n SUPPORT = \"support\"\n BOT = \"bot\"\n ALL_USER_TYPES = (SUPPORT, BOT)\n\n\nclass RelationTypes(object):\n \"\"\"The types of relations known to this server.\n \"\"\"\n\n ANNOTATION = \"m.annotation\"\n REPLACE = \"m.replace\"\n REFERENCE = \"m.reference\"\n\n\nclass LimitBlockingTypes(object):\n \"\"\"Reasons that a server may be blocked\"\"\"\n\n MONTHLY_ACTIVE_USER = \"monthly_active_user\"\n HS_DISABLED = \"hs_disabled\"\n\n\nclass EventContentFields(object):\n \"\"\"Fields found in events' content, regardless of type.\"\"\"\n\n # Labels for the event, cf https://github.com/matrix-org/matrix-doc/pull/2326\n LABELS = \"org.matrix.labels\"\n\n # Timestamp to delete the event after\n # cf https://github.com/matrix-org/matrix-doc/pull/2228\n SELF_DESTRUCT_AFTER = \"org.matrix.self_destruct_after\"\n",
"path": "synapse/api/constants.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n# Copyright 2014-2016 OpenMarket Ltd\n# Copyright 2017 Vector Creations Ltd\n# Copyright 2018-2019 New Vector Ltd\n# Copyright 2019 The Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Contains constants from the specification.\"\"\"\n\n# the \"depth\" field on events is limited to 2**63 - 1\nMAX_DEPTH = 2 ** 63 - 1\n\n# the maximum length for a room alias is 255 characters\nMAX_ALIAS_LENGTH = 255\n\n# the maximum length for a user id is 255 characters\nMAX_USERID_LENGTH = 255\n\n\nclass Membership(object):\n\n \"\"\"Represents the membership states of a user in a room.\"\"\"\n\n INVITE = \"invite\"\n JOIN = \"join\"\n KNOCK = \"knock\"\n LEAVE = \"leave\"\n BAN = \"ban\"\n LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN)\n\n\nclass PresenceState(object):\n \"\"\"Represents the presence state of a user.\"\"\"\n\n OFFLINE = \"offline\"\n UNAVAILABLE = \"unavailable\"\n ONLINE = \"online\"\n\n\nclass JoinRules(object):\n PUBLIC = \"public\"\n KNOCK = \"knock\"\n INVITE = \"invite\"\n PRIVATE = \"private\"\n\n\nclass LoginType(object):\n PASSWORD = \"m.login.password\"\n EMAIL_IDENTITY = \"m.login.email.identity\"\n MSISDN = \"m.login.msisdn\"\n RECAPTCHA = \"m.login.recaptcha\"\n TERMS = \"m.login.terms\"\n SSO = \"m.login.sso\"\n DUMMY = \"m.login.dummy\"\n\n # Only for C/S API v1\n APPLICATION_SERVICE = \"m.login.application_service\"\n SHARED_SECRET = \"org.matrix.login.shared_secret\"\n\n\nclass EventTypes(object):\n Member = \"m.room.member\"\n Create = \"m.room.create\"\n Tombstone = \"m.room.tombstone\"\n JoinRules = \"m.room.join_rules\"\n PowerLevels = \"m.room.power_levels\"\n Aliases = \"m.room.aliases\"\n Redaction = \"m.room.redaction\"\n ThirdPartyInvite = \"m.room.third_party_invite\"\n RelatedGroups = \"m.room.related_groups\"\n\n RoomHistoryVisibility = \"m.room.history_visibility\"\n CanonicalAlias = \"m.room.canonical_alias\"\n Encrypted = \"m.room.encrypted\"\n RoomAvatar = \"m.room.avatar\"\n RoomEncryption = \"m.room.encryption\"\n GuestAccess = \"m.room.guest_access\"\n\n # These are used for validation\n Message = \"m.room.message\"\n Topic = \"m.room.topic\"\n Name = \"m.room.name\"\n\n ServerACL = \"m.room.server_acl\"\n Pinned = \"m.room.pinned_events\"\n\n Retention = \"m.room.retention\"\n\n Presence = \"m.presence\"\n\n\nclass RejectedReason(object):\n AUTH_ERROR = \"auth_error\"\n\n\nclass RoomCreationPreset(object):\n PRIVATE_CHAT = \"private_chat\"\n PUBLIC_CHAT = \"public_chat\"\n TRUSTED_PRIVATE_CHAT = \"trusted_private_chat\"\n\n\nclass ThirdPartyEntityKind(object):\n USER = \"user\"\n LOCATION = \"location\"\n\n\nServerNoticeMsgType = \"m.server_notice\"\nServerNoticeLimitReached = \"m.server_notice.usage_limit_reached\"\n\n\nclass UserTypes(object):\n \"\"\"Allows for user type specific behaviour. With the benefit of hindsight\n 'admin' and 'guest' users should also be UserTypes. Normal users are type None\n \"\"\"\n\n SUPPORT = \"support\"\n BOT = \"bot\"\n ALL_USER_TYPES = (SUPPORT, BOT)\n\n\nclass RelationTypes(object):\n \"\"\"The types of relations known to this server.\n \"\"\"\n\n ANNOTATION = \"m.annotation\"\n REPLACE = \"m.replace\"\n REFERENCE = \"m.reference\"\n\n\nclass LimitBlockingTypes(object):\n \"\"\"Reasons that a server may be blocked\"\"\"\n\n MONTHLY_ACTIVE_USER = \"monthly_active_user\"\n HS_DISABLED = \"hs_disabled\"\n\n\nclass EventContentFields(object):\n \"\"\"Fields found in events' content, regardless of type.\"\"\"\n\n # Labels for the event, cf https://github.com/matrix-org/matrix-doc/pull/2326\n LABELS = \"org.matrix.labels\"\n\n # Timestamp to delete the event after\n # cf https://github.com/matrix-org/matrix-doc/pull/2228\n SELF_DESTRUCT_AFTER = \"org.matrix.self_destruct_after\"\n",
"path": "synapse/api/constants.py"
}
] | diff --git a/changelog.d/7630.feature b/changelog.d/7630.feature
new file mode 100644
index 000000000000..cce31fc881de
--- /dev/null
+++ b/changelog.d/7630.feature
@@ -0,0 +1 @@
+Support the standardized `m.login.sso` user-interactive authentication flow.
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index bcaf2c3600e4..b55fecea50b9 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -61,7 +61,7 @@ class LoginType(object):
MSISDN = "m.login.msisdn"
RECAPTCHA = "m.login.recaptcha"
TERMS = "m.login.terms"
- SSO = "org.matrix.login.sso"
+ SSO = "m.login.sso"
DUMMY = "m.login.dummy"
# Only for C/S API v1
|
frappe__frappe-24160 | bug: user_info for Administrator getting set twice
## Description of the issue
https://github.com/frappe/frappe/blob/730e906dfdf0b6658a9810edf45808174922c391/frappe/boot.py#L267-L275
When logged in as Administrator the user_info in frappe.boot.user_info in frontend is getting set twice for Administrator user, one using "Administrator" name and another by Administrator's email, for e.g. `[email protected]`. Consequently, if another user shares the same email address that the Administrator user has in their User record, the user_info for that specific user isn't added in frappe.boot.user_info. This issue arises because the key already exists in frappe.boot.user_info due to the earlier addition of user_info for the Administrator user.
This situation leads to a problem: if a document record is generated by the aforementioned user with a matching email, when the Administrator user accesses the document, it falsely displays that the "Administrator" created or modified the document. In reality, the document was originally created or modified by the other user. It's important to note that this bug exclusively affects the Administrator's view of the document. For all other users, the document appropriately attributes its creation or modification to the actual user who performed those actions.
@surajshetty3416, can you please look into this.
### Observed result
When Administrator is viewing the document record:

When other users are viewing the same document record:

### Additional information
Frappe Version: v14.45.0
| [
{
"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\"\"\"\nbootstrap client session\n\"\"\"\n\nimport frappe\nimport frappe.defaults\nimport frappe.desk.desk_page\nfrom frappe.core.doctype.navbar_settings.navbar_settings import get_app_logo, get_navbar_settings\nfrom frappe.desk.doctype.form_tour.form_tour import get_onboarding_ui_tours\nfrom frappe.desk.doctype.route_history.route_history import frequently_visited_links\nfrom frappe.desk.form.load import get_meta_bundle\nfrom frappe.email.inbox import get_email_accounts\nfrom frappe.model.base_document import get_controller\nfrom frappe.permissions import has_permission\nfrom frappe.query_builder import DocType\nfrom frappe.query_builder.functions import Count\nfrom frappe.query_builder.terms import ParameterizedValueWrapper, SubQuery\nfrom frappe.social.doctype.energy_point_log.energy_point_log import get_energy_points\nfrom frappe.social.doctype.energy_point_settings.energy_point_settings import (\n\tis_energy_point_enabled,\n)\nfrom frappe.utils import add_user_info, cstr, get_system_timezone\nfrom frappe.utils.change_log import get_versions\nfrom frappe.website.doctype.web_page_view.web_page_view import is_tracking_enabled\n\n\ndef get_bootinfo():\n\t\"\"\"build and return boot info\"\"\"\n\tfrom frappe.translate import get_lang_dict, get_translated_doctypes\n\n\tfrappe.set_user_lang(frappe.session.user)\n\tbootinfo = frappe._dict()\n\thooks = frappe.get_hooks()\n\tdoclist = []\n\n\t# user\n\tget_user(bootinfo)\n\n\t# system info\n\tbootinfo.sitename = frappe.local.site\n\tbootinfo.sysdefaults = frappe.defaults.get_defaults()\n\tbootinfo.server_date = frappe.utils.nowdate()\n\n\tif frappe.session[\"user\"] != \"Guest\":\n\t\tbootinfo.user_info = get_user_info()\n\t\tbootinfo.sid = frappe.session[\"sid\"]\n\n\tbootinfo.modules = {}\n\tbootinfo.module_list = []\n\tload_desktop_data(bootinfo)\n\tbootinfo.letter_heads = get_letter_heads()\n\tbootinfo.active_domains = frappe.get_active_domains()\n\tbootinfo.all_domains = [d.get(\"name\") for d in frappe.get_all(\"Domain\")]\n\tadd_layouts(bootinfo)\n\n\tbootinfo.module_app = frappe.local.module_app\n\tbootinfo.single_types = [d.name for d in frappe.get_all(\"DocType\", {\"issingle\": 1})]\n\tbootinfo.nested_set_doctypes = [\n\t\td.parent for d in frappe.get_all(\"DocField\", {\"fieldname\": \"lft\"}, [\"parent\"])\n\t]\n\tadd_home_page(bootinfo, doclist)\n\tbootinfo.page_info = get_allowed_pages()\n\tload_translations(bootinfo)\n\tadd_timezone_info(bootinfo)\n\tload_conf_settings(bootinfo)\n\tload_print(bootinfo, doclist)\n\tdoclist.extend(get_meta_bundle(\"Page\"))\n\tbootinfo.home_folder = frappe.db.get_value(\"File\", {\"is_home_folder\": 1})\n\tbootinfo.navbar_settings = get_navbar_settings()\n\tbootinfo.notification_settings = get_notification_settings()\n\tbootinfo.onboarding_tours = get_onboarding_ui_tours()\n\tset_time_zone(bootinfo)\n\n\t# ipinfo\n\tif frappe.session.data.get(\"ipinfo\"):\n\t\tbootinfo.ipinfo = frappe.session[\"data\"][\"ipinfo\"]\n\n\t# add docs\n\tbootinfo.docs = doclist\n\tload_country_doc(bootinfo)\n\tload_currency_docs(bootinfo)\n\n\tfor method in hooks.boot_session or []:\n\t\tfrappe.get_attr(method)(bootinfo)\n\n\tif bootinfo.lang:\n\t\tbootinfo.lang = str(bootinfo.lang)\n\tbootinfo.versions = {k: v[\"version\"] for k, v in get_versions().items()}\n\n\tbootinfo.error_report_email = frappe.conf.error_report_email\n\tbootinfo.calendars = sorted(frappe.get_hooks(\"calendars\"))\n\tbootinfo.treeviews = frappe.get_hooks(\"treeviews\") or []\n\tbootinfo.lang_dict = get_lang_dict()\n\tbootinfo.success_action = get_success_action()\n\tbootinfo.update(get_email_accounts(user=frappe.session.user))\n\tbootinfo.energy_points_enabled = is_energy_point_enabled()\n\tbootinfo.website_tracking_enabled = is_tracking_enabled()\n\tbootinfo.points = get_energy_points(frappe.session.user)\n\tbootinfo.frequently_visited_links = frequently_visited_links()\n\tbootinfo.link_preview_doctypes = get_link_preview_doctypes()\n\tbootinfo.additional_filters_config = get_additional_filters_from_hooks()\n\tbootinfo.desk_settings = get_desk_settings()\n\tbootinfo.app_logo_url = get_app_logo()\n\tbootinfo.link_title_doctypes = get_link_title_doctypes()\n\tbootinfo.translated_doctypes = get_translated_doctypes()\n\tbootinfo.subscription_conf = add_subscription_conf()\n\tbootinfo.marketplace_apps = get_marketplace_apps()\n\n\treturn bootinfo\n\n\ndef get_letter_heads():\n\tletter_heads = {}\n\tfor letter_head in frappe.get_all(\"Letter Head\", fields=[\"name\", \"content\", \"footer\"]):\n\t\tletter_heads.setdefault(\n\t\t\tletter_head.name, {\"header\": letter_head.content, \"footer\": letter_head.footer}\n\t\t)\n\n\treturn letter_heads\n\n\ndef load_conf_settings(bootinfo):\n\tfrom frappe.core.api.file import get_max_file_size\n\n\tbootinfo.max_file_size = get_max_file_size()\n\tfor key in (\"developer_mode\", \"socketio_port\", \"file_watcher_port\"):\n\t\tif key in frappe.conf:\n\t\t\tbootinfo[key] = frappe.conf.get(key)\n\n\ndef load_desktop_data(bootinfo):\n\tfrom frappe.desk.desktop import get_workspace_sidebar_items\n\n\tbootinfo.allowed_workspaces = get_workspace_sidebar_items().get(\"pages\")\n\tbootinfo.module_wise_workspaces = get_controller(\"Workspace\").get_module_wise_workspaces()\n\tbootinfo.dashboards = frappe.get_all(\"Dashboard\")\n\n\ndef get_allowed_pages(cache=False):\n\treturn get_user_pages_or_reports(\"Page\", cache=cache)\n\n\ndef get_allowed_reports(cache=False):\n\treturn get_user_pages_or_reports(\"Report\", cache=cache)\n\n\ndef get_allowed_report_names(cache=False) -> set[str]:\n\treturn {cstr(report) for report in get_allowed_reports(cache).keys() if report}\n\n\ndef get_user_pages_or_reports(parent, cache=False):\n\tif cache:\n\t\thas_role = frappe.cache.get_value(\"has_role:\" + parent, user=frappe.session.user)\n\t\tif has_role:\n\t\t\treturn has_role\n\n\troles = frappe.get_roles()\n\thas_role = {}\n\n\tpage = DocType(\"Page\")\n\treport = DocType(\"Report\")\n\n\tif parent == \"Report\":\n\t\tcolumns = (report.name.as_(\"title\"), report.ref_doctype, report.report_type)\n\telse:\n\t\tcolumns = (page.title.as_(\"title\"),)\n\n\tcustomRole = DocType(\"Custom Role\")\n\thasRole = DocType(\"Has Role\")\n\tparentTable = DocType(parent)\n\n\t# get pages or reports set on custom role\n\tpages_with_custom_roles = (\n\t\tfrappe.qb.from_(customRole)\n\t\t.from_(hasRole)\n\t\t.from_(parentTable)\n\t\t.select(\n\t\t\tcustomRole[parent.lower()].as_(\"name\"), customRole.modified, customRole.ref_doctype, *columns\n\t\t)\n\t\t.where(\n\t\t\t(hasRole.parent == customRole.name)\n\t\t\t& (parentTable.name == customRole[parent.lower()])\n\t\t\t& (customRole[parent.lower()].isnotnull())\n\t\t\t& (hasRole.role.isin(roles))\n\t\t)\n\t).run(as_dict=True)\n\n\tfor p in pages_with_custom_roles:\n\t\thas_role[p.name] = {\"modified\": p.modified, \"title\": p.title, \"ref_doctype\": p.ref_doctype}\n\n\tsubq = (\n\t\tfrappe.qb.from_(customRole)\n\t\t.select(customRole[parent.lower()])\n\t\t.where(customRole[parent.lower()].isnotnull())\n\t)\n\n\tpages_with_standard_roles = (\n\t\tfrappe.qb.from_(hasRole)\n\t\t.from_(parentTable)\n\t\t.select(parentTable.name.as_(\"name\"), parentTable.modified, *columns)\n\t\t.where(\n\t\t\t(hasRole.role.isin(roles))\n\t\t\t& (hasRole.parent == parentTable.name)\n\t\t\t& (parentTable.name.notin(subq))\n\t\t)\n\t\t.distinct()\n\t)\n\n\tif parent == \"Report\":\n\t\tpages_with_standard_roles = pages_with_standard_roles.where(report.disabled == 0)\n\n\tpages_with_standard_roles = pages_with_standard_roles.run(as_dict=True)\n\n\tfor p in pages_with_standard_roles:\n\t\tif p.name not in has_role:\n\t\t\thas_role[p.name] = {\"modified\": p.modified, \"title\": p.title}\n\t\t\tif parent == \"Report\":\n\t\t\t\thas_role[p.name].update({\"ref_doctype\": p.ref_doctype})\n\n\tno_of_roles = SubQuery(\n\t\tfrappe.qb.from_(hasRole).select(Count(\"*\")).where(hasRole.parent == parentTable.name)\n\t)\n\n\t# pages with no role are allowed\n\tif parent == \"Page\":\n\n\t\tpages_with_no_roles = (\n\t\t\tfrappe.qb.from_(parentTable)\n\t\t\t.select(parentTable.name, parentTable.modified, *columns)\n\t\t\t.where(no_of_roles == 0)\n\t\t).run(as_dict=True)\n\n\t\tfor p in pages_with_no_roles:\n\t\t\tif p.name not in has_role:\n\t\t\t\thas_role[p.name] = {\"modified\": p.modified, \"title\": p.title}\n\n\telif parent == \"Report\":\n\t\tif not has_permission(\"Report\", raise_exception=False):\n\t\t\treturn {}\n\n\t\treports = frappe.get_list(\n\t\t\t\"Report\",\n\t\t\tfields=[\"name\", \"report_type\"],\n\t\t\tfilters={\"name\": (\"in\", has_role.keys())},\n\t\t\tignore_ifnull=True,\n\t\t)\n\t\tfor report in reports:\n\t\t\thas_role[report.name][\"report_type\"] = report.report_type\n\n\t\tnon_permitted_reports = set(has_role.keys()) - {r.name for r in reports}\n\t\tfor r in non_permitted_reports:\n\t\t\thas_role.pop(r, None)\n\n\t# Expire every six hours\n\tfrappe.cache.set_value(\"has_role:\" + parent, has_role, frappe.session.user, 21600)\n\treturn has_role\n\n\ndef load_translations(bootinfo):\n\tfrom frappe.translate import get_messages_for_boot\n\n\tbootinfo[\"lang\"] = frappe.lang\n\tbootinfo[\"__messages\"] = get_messages_for_boot()\n\n\ndef get_user_info():\n\t# get info for current user\n\tuser_info = frappe._dict()\n\tadd_user_info(frappe.session.user, user_info)\n\n\tif frappe.session.user == \"Administrator\" and user_info.Administrator.email:\n\t\tuser_info[user_info.Administrator.email] = user_info.Administrator\n\n\treturn user_info\n\n\ndef get_user(bootinfo):\n\t\"\"\"get user info\"\"\"\n\tbootinfo.user = frappe.get_user().load_user()\n\n\ndef add_home_page(bootinfo, docs):\n\t\"\"\"load home page\"\"\"\n\tif frappe.session.user == \"Guest\":\n\t\treturn\n\thome_page = frappe.db.get_default(\"desktop:home_page\")\n\n\tif home_page == \"setup-wizard\":\n\t\tbootinfo.setup_wizard_requires = frappe.get_hooks(\"setup_wizard_requires\")\n\n\ttry:\n\t\tpage = frappe.desk.desk_page.get(home_page)\n\t\tdocs.append(page)\n\t\tbootinfo[\"home_page\"] = page.name\n\texcept (frappe.DoesNotExistError, frappe.PermissionError):\n\t\tfrappe.clear_last_message()\n\t\tbootinfo[\"home_page\"] = \"Workspaces\"\n\n\ndef add_timezone_info(bootinfo):\n\tsystem = bootinfo.sysdefaults.get(\"time_zone\")\n\timport frappe.utils.momentjs\n\n\tbootinfo.timezone_info = {\"zones\": {}, \"rules\": {}, \"links\": {}}\n\tfrappe.utils.momentjs.update(system, bootinfo.timezone_info)\n\n\ndef load_print(bootinfo, doclist):\n\tprint_settings = frappe.db.get_singles_dict(\"Print Settings\")\n\tprint_settings.doctype = \":Print Settings\"\n\tdoclist.append(print_settings)\n\tload_print_css(bootinfo, print_settings)\n\n\ndef load_print_css(bootinfo, print_settings):\n\timport frappe.www.printview\n\n\tbootinfo.print_css = frappe.www.printview.get_print_style(\n\t\tprint_settings.print_style or \"Redesign\", for_legacy=True\n\t)\n\n\ndef get_unseen_notes():\n\tnote = DocType(\"Note\")\n\tnsb = DocType(\"Note Seen By\").as_(\"nsb\")\n\n\treturn (\n\t\tfrappe.qb.from_(note)\n\t\t.select(note.name, note.title, note.content, note.notify_on_every_login)\n\t\t.where(\n\t\t\t(note.notify_on_login == 1)\n\t\t\t& (note.expire_notification_on > frappe.utils.now())\n\t\t\t& (\n\t\t\t\tParameterizedValueWrapper(frappe.session.user).notin(\n\t\t\t\t\tSubQuery(frappe.qb.from_(nsb).select(nsb.user).where(nsb.parent == note.name))\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\t).run(as_dict=1)\n\n\ndef get_success_action():\n\treturn frappe.get_all(\"Success Action\", fields=[\"*\"])\n\n\ndef get_link_preview_doctypes():\n\tfrom frappe.utils import cint\n\n\tlink_preview_doctypes = [d.name for d in frappe.get_all(\"DocType\", {\"show_preview_popup\": 1})]\n\tcustomizations = frappe.get_all(\n\t\t\"Property Setter\", fields=[\"doc_type\", \"value\"], filters={\"property\": \"show_preview_popup\"}\n\t)\n\n\tfor custom in customizations:\n\t\tif not cint(custom.value) and custom.doc_type in link_preview_doctypes:\n\t\t\tlink_preview_doctypes.remove(custom.doc_type)\n\t\telse:\n\t\t\tlink_preview_doctypes.append(custom.doc_type)\n\n\treturn link_preview_doctypes\n\n\ndef get_additional_filters_from_hooks():\n\tfilter_config = frappe._dict()\n\tfilter_hooks = frappe.get_hooks(\"filters_config\")\n\tfor hook in filter_hooks:\n\t\tfilter_config.update(frappe.get_attr(hook)())\n\n\treturn filter_config\n\n\ndef add_layouts(bootinfo):\n\t# add routes for readable doctypes\n\tbootinfo.doctype_layouts = frappe.get_all(\"DocType Layout\", [\"name\", \"route\", \"document_type\"])\n\n\ndef get_desk_settings():\n\trole_list = frappe.get_all(\"Role\", fields=[\"*\"], filters=dict(name=[\"in\", frappe.get_roles()]))\n\tdesk_settings = {}\n\n\tfrom frappe.core.doctype.role.role import desk_properties\n\n\tfor role in role_list:\n\t\tfor key in desk_properties:\n\t\t\tdesk_settings[key] = desk_settings.get(key) or role.get(key)\n\n\treturn desk_settings\n\n\ndef get_notification_settings():\n\treturn frappe.get_cached_doc(\"Notification Settings\", frappe.session.user)\n\n\ndef get_link_title_doctypes():\n\tdts = frappe.get_all(\"DocType\", {\"show_title_field_in_link\": 1})\n\tcustom_dts = frappe.get_all(\n\t\t\"Property Setter\",\n\t\t{\"property\": \"show_title_field_in_link\", \"value\": \"1\"},\n\t\t[\"doc_type as name\"],\n\t)\n\treturn [d.name for d in dts + custom_dts if d]\n\n\ndef set_time_zone(bootinfo):\n\tbootinfo.time_zone = {\n\t\t\"system\": get_system_timezone(),\n\t\t\"user\": bootinfo.get(\"user_info\", {}).get(frappe.session.user, {}).get(\"time_zone\", None)\n\t\tor get_system_timezone(),\n\t}\n\n\ndef load_country_doc(bootinfo):\n\tcountry = frappe.db.get_default(\"country\")\n\tif not country:\n\t\treturn\n\ttry:\n\t\tbootinfo.docs.append(frappe.get_cached_doc(\"Country\", country))\n\texcept Exception:\n\t\tpass\n\n\ndef load_currency_docs(bootinfo):\n\tcurrency = frappe.qb.DocType(\"Currency\")\n\n\tcurrency_docs = (\n\t\tfrappe.qb.from_(currency)\n\t\t.select(\n\t\t\tcurrency.name,\n\t\t\tcurrency.fraction,\n\t\t\tcurrency.fraction_units,\n\t\t\tcurrency.number_format,\n\t\t\tcurrency.smallest_currency_fraction_value,\n\t\t\tcurrency.symbol,\n\t\t\tcurrency.symbol_on_right,\n\t\t)\n\t\t.where(currency.enabled == 1)\n\t\t.run(as_dict=1, update={\"doctype\": \":Currency\"})\n\t)\n\n\tbootinfo.docs += currency_docs\n\n\ndef get_marketplace_apps():\n\timport requests\n\n\tapps = []\n\tcache_key = \"frappe_marketplace_apps\"\n\n\tif frappe.conf.developer_mode:\n\t\treturn apps\n\n\tdef get_apps_from_fc():\n\t\tremote_site = frappe.conf.frappecloud_url or \"frappecloud.com\"\n\t\trequest_url = f\"https://{remote_site}/api/method/press.api.marketplace.get_marketplace_apps\"\n\t\trequest = requests.get(request_url, timeout=2.0)\n\t\treturn request.json()[\"message\"]\n\n\ttry:\n\t\tapps = frappe.cache().get_value(cache_key, get_apps_from_fc, shared=True)\n\t\tinstalled_apps = set(frappe.get_installed_apps())\n\t\tapps = [app for app in apps if app[\"name\"] not in installed_apps]\n\texcept Exception:\n\t\t# Don't retry for a day\n\t\tfrappe.cache().set_value(cache_key, apps, shared=True, expires_in_sec=24 * 60 * 60)\n\n\treturn apps\n\n\ndef add_subscription_conf():\n\ttry:\n\t\treturn frappe.conf.subscription\n\texcept Exception:\n\t\treturn \"\"\n",
"path": "frappe/boot.py"
}
] | [
{
"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\"\"\"\nbootstrap client session\n\"\"\"\n\nimport frappe\nimport frappe.defaults\nimport frappe.desk.desk_page\nfrom frappe.core.doctype.navbar_settings.navbar_settings import get_app_logo, get_navbar_settings\nfrom frappe.desk.doctype.form_tour.form_tour import get_onboarding_ui_tours\nfrom frappe.desk.doctype.route_history.route_history import frequently_visited_links\nfrom frappe.desk.form.load import get_meta_bundle\nfrom frappe.email.inbox import get_email_accounts\nfrom frappe.model.base_document import get_controller\nfrom frappe.permissions import has_permission\nfrom frappe.query_builder import DocType\nfrom frappe.query_builder.functions import Count\nfrom frappe.query_builder.terms import ParameterizedValueWrapper, SubQuery\nfrom frappe.social.doctype.energy_point_log.energy_point_log import get_energy_points\nfrom frappe.social.doctype.energy_point_settings.energy_point_settings import (\n\tis_energy_point_enabled,\n)\nfrom frappe.utils import add_user_info, cstr, get_system_timezone\nfrom frappe.utils.change_log import get_versions\nfrom frappe.website.doctype.web_page_view.web_page_view import is_tracking_enabled\n\n\ndef get_bootinfo():\n\t\"\"\"build and return boot info\"\"\"\n\tfrom frappe.translate import get_lang_dict, get_translated_doctypes\n\n\tfrappe.set_user_lang(frappe.session.user)\n\tbootinfo = frappe._dict()\n\thooks = frappe.get_hooks()\n\tdoclist = []\n\n\t# user\n\tget_user(bootinfo)\n\n\t# system info\n\tbootinfo.sitename = frappe.local.site\n\tbootinfo.sysdefaults = frappe.defaults.get_defaults()\n\tbootinfo.server_date = frappe.utils.nowdate()\n\n\tif frappe.session[\"user\"] != \"Guest\":\n\t\tbootinfo.user_info = get_user_info()\n\t\tbootinfo.sid = frappe.session[\"sid\"]\n\n\tbootinfo.modules = {}\n\tbootinfo.module_list = []\n\tload_desktop_data(bootinfo)\n\tbootinfo.letter_heads = get_letter_heads()\n\tbootinfo.active_domains = frappe.get_active_domains()\n\tbootinfo.all_domains = [d.get(\"name\") for d in frappe.get_all(\"Domain\")]\n\tadd_layouts(bootinfo)\n\n\tbootinfo.module_app = frappe.local.module_app\n\tbootinfo.single_types = [d.name for d in frappe.get_all(\"DocType\", {\"issingle\": 1})]\n\tbootinfo.nested_set_doctypes = [\n\t\td.parent for d in frappe.get_all(\"DocField\", {\"fieldname\": \"lft\"}, [\"parent\"])\n\t]\n\tadd_home_page(bootinfo, doclist)\n\tbootinfo.page_info = get_allowed_pages()\n\tload_translations(bootinfo)\n\tadd_timezone_info(bootinfo)\n\tload_conf_settings(bootinfo)\n\tload_print(bootinfo, doclist)\n\tdoclist.extend(get_meta_bundle(\"Page\"))\n\tbootinfo.home_folder = frappe.db.get_value(\"File\", {\"is_home_folder\": 1})\n\tbootinfo.navbar_settings = get_navbar_settings()\n\tbootinfo.notification_settings = get_notification_settings()\n\tbootinfo.onboarding_tours = get_onboarding_ui_tours()\n\tset_time_zone(bootinfo)\n\n\t# ipinfo\n\tif frappe.session.data.get(\"ipinfo\"):\n\t\tbootinfo.ipinfo = frappe.session[\"data\"][\"ipinfo\"]\n\n\t# add docs\n\tbootinfo.docs = doclist\n\tload_country_doc(bootinfo)\n\tload_currency_docs(bootinfo)\n\n\tfor method in hooks.boot_session or []:\n\t\tfrappe.get_attr(method)(bootinfo)\n\n\tif bootinfo.lang:\n\t\tbootinfo.lang = str(bootinfo.lang)\n\tbootinfo.versions = {k: v[\"version\"] for k, v in get_versions().items()}\n\n\tbootinfo.error_report_email = frappe.conf.error_report_email\n\tbootinfo.calendars = sorted(frappe.get_hooks(\"calendars\"))\n\tbootinfo.treeviews = frappe.get_hooks(\"treeviews\") or []\n\tbootinfo.lang_dict = get_lang_dict()\n\tbootinfo.success_action = get_success_action()\n\tbootinfo.update(get_email_accounts(user=frappe.session.user))\n\tbootinfo.energy_points_enabled = is_energy_point_enabled()\n\tbootinfo.website_tracking_enabled = is_tracking_enabled()\n\tbootinfo.points = get_energy_points(frappe.session.user)\n\tbootinfo.frequently_visited_links = frequently_visited_links()\n\tbootinfo.link_preview_doctypes = get_link_preview_doctypes()\n\tbootinfo.additional_filters_config = get_additional_filters_from_hooks()\n\tbootinfo.desk_settings = get_desk_settings()\n\tbootinfo.app_logo_url = get_app_logo()\n\tbootinfo.link_title_doctypes = get_link_title_doctypes()\n\tbootinfo.translated_doctypes = get_translated_doctypes()\n\tbootinfo.subscription_conf = add_subscription_conf()\n\tbootinfo.marketplace_apps = get_marketplace_apps()\n\n\treturn bootinfo\n\n\ndef get_letter_heads():\n\tletter_heads = {}\n\tfor letter_head in frappe.get_all(\"Letter Head\", fields=[\"name\", \"content\", \"footer\"]):\n\t\tletter_heads.setdefault(\n\t\t\tletter_head.name, {\"header\": letter_head.content, \"footer\": letter_head.footer}\n\t\t)\n\n\treturn letter_heads\n\n\ndef load_conf_settings(bootinfo):\n\tfrom frappe.core.api.file import get_max_file_size\n\n\tbootinfo.max_file_size = get_max_file_size()\n\tfor key in (\"developer_mode\", \"socketio_port\", \"file_watcher_port\"):\n\t\tif key in frappe.conf:\n\t\t\tbootinfo[key] = frappe.conf.get(key)\n\n\ndef load_desktop_data(bootinfo):\n\tfrom frappe.desk.desktop import get_workspace_sidebar_items\n\n\tbootinfo.allowed_workspaces = get_workspace_sidebar_items().get(\"pages\")\n\tbootinfo.module_wise_workspaces = get_controller(\"Workspace\").get_module_wise_workspaces()\n\tbootinfo.dashboards = frappe.get_all(\"Dashboard\")\n\n\ndef get_allowed_pages(cache=False):\n\treturn get_user_pages_or_reports(\"Page\", cache=cache)\n\n\ndef get_allowed_reports(cache=False):\n\treturn get_user_pages_or_reports(\"Report\", cache=cache)\n\n\ndef get_allowed_report_names(cache=False) -> set[str]:\n\treturn {cstr(report) for report in get_allowed_reports(cache).keys() if report}\n\n\ndef get_user_pages_or_reports(parent, cache=False):\n\tif cache:\n\t\thas_role = frappe.cache.get_value(\"has_role:\" + parent, user=frappe.session.user)\n\t\tif has_role:\n\t\t\treturn has_role\n\n\troles = frappe.get_roles()\n\thas_role = {}\n\n\tpage = DocType(\"Page\")\n\treport = DocType(\"Report\")\n\n\tif parent == \"Report\":\n\t\tcolumns = (report.name.as_(\"title\"), report.ref_doctype, report.report_type)\n\telse:\n\t\tcolumns = (page.title.as_(\"title\"),)\n\n\tcustomRole = DocType(\"Custom Role\")\n\thasRole = DocType(\"Has Role\")\n\tparentTable = DocType(parent)\n\n\t# get pages or reports set on custom role\n\tpages_with_custom_roles = (\n\t\tfrappe.qb.from_(customRole)\n\t\t.from_(hasRole)\n\t\t.from_(parentTable)\n\t\t.select(\n\t\t\tcustomRole[parent.lower()].as_(\"name\"), customRole.modified, customRole.ref_doctype, *columns\n\t\t)\n\t\t.where(\n\t\t\t(hasRole.parent == customRole.name)\n\t\t\t& (parentTable.name == customRole[parent.lower()])\n\t\t\t& (customRole[parent.lower()].isnotnull())\n\t\t\t& (hasRole.role.isin(roles))\n\t\t)\n\t).run(as_dict=True)\n\n\tfor p in pages_with_custom_roles:\n\t\thas_role[p.name] = {\"modified\": p.modified, \"title\": p.title, \"ref_doctype\": p.ref_doctype}\n\n\tsubq = (\n\t\tfrappe.qb.from_(customRole)\n\t\t.select(customRole[parent.lower()])\n\t\t.where(customRole[parent.lower()].isnotnull())\n\t)\n\n\tpages_with_standard_roles = (\n\t\tfrappe.qb.from_(hasRole)\n\t\t.from_(parentTable)\n\t\t.select(parentTable.name.as_(\"name\"), parentTable.modified, *columns)\n\t\t.where(\n\t\t\t(hasRole.role.isin(roles))\n\t\t\t& (hasRole.parent == parentTable.name)\n\t\t\t& (parentTable.name.notin(subq))\n\t\t)\n\t\t.distinct()\n\t)\n\n\tif parent == \"Report\":\n\t\tpages_with_standard_roles = pages_with_standard_roles.where(report.disabled == 0)\n\n\tpages_with_standard_roles = pages_with_standard_roles.run(as_dict=True)\n\n\tfor p in pages_with_standard_roles:\n\t\tif p.name not in has_role:\n\t\t\thas_role[p.name] = {\"modified\": p.modified, \"title\": p.title}\n\t\t\tif parent == \"Report\":\n\t\t\t\thas_role[p.name].update({\"ref_doctype\": p.ref_doctype})\n\n\tno_of_roles = SubQuery(\n\t\tfrappe.qb.from_(hasRole).select(Count(\"*\")).where(hasRole.parent == parentTable.name)\n\t)\n\n\t# pages with no role are allowed\n\tif parent == \"Page\":\n\n\t\tpages_with_no_roles = (\n\t\t\tfrappe.qb.from_(parentTable)\n\t\t\t.select(parentTable.name, parentTable.modified, *columns)\n\t\t\t.where(no_of_roles == 0)\n\t\t).run(as_dict=True)\n\n\t\tfor p in pages_with_no_roles:\n\t\t\tif p.name not in has_role:\n\t\t\t\thas_role[p.name] = {\"modified\": p.modified, \"title\": p.title}\n\n\telif parent == \"Report\":\n\t\tif not has_permission(\"Report\", raise_exception=False):\n\t\t\treturn {}\n\n\t\treports = frappe.get_list(\n\t\t\t\"Report\",\n\t\t\tfields=[\"name\", \"report_type\"],\n\t\t\tfilters={\"name\": (\"in\", has_role.keys())},\n\t\t\tignore_ifnull=True,\n\t\t)\n\t\tfor report in reports:\n\t\t\thas_role[report.name][\"report_type\"] = report.report_type\n\n\t\tnon_permitted_reports = set(has_role.keys()) - {r.name for r in reports}\n\t\tfor r in non_permitted_reports:\n\t\t\thas_role.pop(r, None)\n\n\t# Expire every six hours\n\tfrappe.cache.set_value(\"has_role:\" + parent, has_role, frappe.session.user, 21600)\n\treturn has_role\n\n\ndef load_translations(bootinfo):\n\tfrom frappe.translate import get_messages_for_boot\n\n\tbootinfo[\"lang\"] = frappe.lang\n\tbootinfo[\"__messages\"] = get_messages_for_boot()\n\n\ndef get_user_info():\n\t# get info for current user\n\tuser_info = frappe._dict()\n\tadd_user_info(frappe.session.user, user_info)\n\n\treturn user_info\n\n\ndef get_user(bootinfo):\n\t\"\"\"get user info\"\"\"\n\tbootinfo.user = frappe.get_user().load_user()\n\n\ndef add_home_page(bootinfo, docs):\n\t\"\"\"load home page\"\"\"\n\tif frappe.session.user == \"Guest\":\n\t\treturn\n\thome_page = frappe.db.get_default(\"desktop:home_page\")\n\n\tif home_page == \"setup-wizard\":\n\t\tbootinfo.setup_wizard_requires = frappe.get_hooks(\"setup_wizard_requires\")\n\n\ttry:\n\t\tpage = frappe.desk.desk_page.get(home_page)\n\t\tdocs.append(page)\n\t\tbootinfo[\"home_page\"] = page.name\n\texcept (frappe.DoesNotExistError, frappe.PermissionError):\n\t\tfrappe.clear_last_message()\n\t\tbootinfo[\"home_page\"] = \"Workspaces\"\n\n\ndef add_timezone_info(bootinfo):\n\tsystem = bootinfo.sysdefaults.get(\"time_zone\")\n\timport frappe.utils.momentjs\n\n\tbootinfo.timezone_info = {\"zones\": {}, \"rules\": {}, \"links\": {}}\n\tfrappe.utils.momentjs.update(system, bootinfo.timezone_info)\n\n\ndef load_print(bootinfo, doclist):\n\tprint_settings = frappe.db.get_singles_dict(\"Print Settings\")\n\tprint_settings.doctype = \":Print Settings\"\n\tdoclist.append(print_settings)\n\tload_print_css(bootinfo, print_settings)\n\n\ndef load_print_css(bootinfo, print_settings):\n\timport frappe.www.printview\n\n\tbootinfo.print_css = frappe.www.printview.get_print_style(\n\t\tprint_settings.print_style or \"Redesign\", for_legacy=True\n\t)\n\n\ndef get_unseen_notes():\n\tnote = DocType(\"Note\")\n\tnsb = DocType(\"Note Seen By\").as_(\"nsb\")\n\n\treturn (\n\t\tfrappe.qb.from_(note)\n\t\t.select(note.name, note.title, note.content, note.notify_on_every_login)\n\t\t.where(\n\t\t\t(note.notify_on_login == 1)\n\t\t\t& (note.expire_notification_on > frappe.utils.now())\n\t\t\t& (\n\t\t\t\tParameterizedValueWrapper(frappe.session.user).notin(\n\t\t\t\t\tSubQuery(frappe.qb.from_(nsb).select(nsb.user).where(nsb.parent == note.name))\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\t).run(as_dict=1)\n\n\ndef get_success_action():\n\treturn frappe.get_all(\"Success Action\", fields=[\"*\"])\n\n\ndef get_link_preview_doctypes():\n\tfrom frappe.utils import cint\n\n\tlink_preview_doctypes = [d.name for d in frappe.get_all(\"DocType\", {\"show_preview_popup\": 1})]\n\tcustomizations = frappe.get_all(\n\t\t\"Property Setter\", fields=[\"doc_type\", \"value\"], filters={\"property\": \"show_preview_popup\"}\n\t)\n\n\tfor custom in customizations:\n\t\tif not cint(custom.value) and custom.doc_type in link_preview_doctypes:\n\t\t\tlink_preview_doctypes.remove(custom.doc_type)\n\t\telse:\n\t\t\tlink_preview_doctypes.append(custom.doc_type)\n\n\treturn link_preview_doctypes\n\n\ndef get_additional_filters_from_hooks():\n\tfilter_config = frappe._dict()\n\tfilter_hooks = frappe.get_hooks(\"filters_config\")\n\tfor hook in filter_hooks:\n\t\tfilter_config.update(frappe.get_attr(hook)())\n\n\treturn filter_config\n\n\ndef add_layouts(bootinfo):\n\t# add routes for readable doctypes\n\tbootinfo.doctype_layouts = frappe.get_all(\"DocType Layout\", [\"name\", \"route\", \"document_type\"])\n\n\ndef get_desk_settings():\n\trole_list = frappe.get_all(\"Role\", fields=[\"*\"], filters=dict(name=[\"in\", frappe.get_roles()]))\n\tdesk_settings = {}\n\n\tfrom frappe.core.doctype.role.role import desk_properties\n\n\tfor role in role_list:\n\t\tfor key in desk_properties:\n\t\t\tdesk_settings[key] = desk_settings.get(key) or role.get(key)\n\n\treturn desk_settings\n\n\ndef get_notification_settings():\n\treturn frappe.get_cached_doc(\"Notification Settings\", frappe.session.user)\n\n\ndef get_link_title_doctypes():\n\tdts = frappe.get_all(\"DocType\", {\"show_title_field_in_link\": 1})\n\tcustom_dts = frappe.get_all(\n\t\t\"Property Setter\",\n\t\t{\"property\": \"show_title_field_in_link\", \"value\": \"1\"},\n\t\t[\"doc_type as name\"],\n\t)\n\treturn [d.name for d in dts + custom_dts if d]\n\n\ndef set_time_zone(bootinfo):\n\tbootinfo.time_zone = {\n\t\t\"system\": get_system_timezone(),\n\t\t\"user\": bootinfo.get(\"user_info\", {}).get(frappe.session.user, {}).get(\"time_zone\", None)\n\t\tor get_system_timezone(),\n\t}\n\n\ndef load_country_doc(bootinfo):\n\tcountry = frappe.db.get_default(\"country\")\n\tif not country:\n\t\treturn\n\ttry:\n\t\tbootinfo.docs.append(frappe.get_cached_doc(\"Country\", country))\n\texcept Exception:\n\t\tpass\n\n\ndef load_currency_docs(bootinfo):\n\tcurrency = frappe.qb.DocType(\"Currency\")\n\n\tcurrency_docs = (\n\t\tfrappe.qb.from_(currency)\n\t\t.select(\n\t\t\tcurrency.name,\n\t\t\tcurrency.fraction,\n\t\t\tcurrency.fraction_units,\n\t\t\tcurrency.number_format,\n\t\t\tcurrency.smallest_currency_fraction_value,\n\t\t\tcurrency.symbol,\n\t\t\tcurrency.symbol_on_right,\n\t\t)\n\t\t.where(currency.enabled == 1)\n\t\t.run(as_dict=1, update={\"doctype\": \":Currency\"})\n\t)\n\n\tbootinfo.docs += currency_docs\n\n\ndef get_marketplace_apps():\n\timport requests\n\n\tapps = []\n\tcache_key = \"frappe_marketplace_apps\"\n\n\tif frappe.conf.developer_mode:\n\t\treturn apps\n\n\tdef get_apps_from_fc():\n\t\tremote_site = frappe.conf.frappecloud_url or \"frappecloud.com\"\n\t\trequest_url = f\"https://{remote_site}/api/method/press.api.marketplace.get_marketplace_apps\"\n\t\trequest = requests.get(request_url, timeout=2.0)\n\t\treturn request.json()[\"message\"]\n\n\ttry:\n\t\tapps = frappe.cache().get_value(cache_key, get_apps_from_fc, shared=True)\n\t\tinstalled_apps = set(frappe.get_installed_apps())\n\t\tapps = [app for app in apps if app[\"name\"] not in installed_apps]\n\texcept Exception:\n\t\t# Don't retry for a day\n\t\tfrappe.cache().set_value(cache_key, apps, shared=True, expires_in_sec=24 * 60 * 60)\n\n\treturn apps\n\n\ndef add_subscription_conf():\n\ttry:\n\t\treturn frappe.conf.subscription\n\texcept Exception:\n\t\treturn \"\"\n",
"path": "frappe/boot.py"
}
] | diff --git a/frappe/boot.py b/frappe/boot.py
index 2ce950a55a7a..d0e6204e78a4 100644
--- a/frappe/boot.py
+++ b/frappe/boot.py
@@ -270,9 +270,6 @@ def get_user_info():
user_info = frappe._dict()
add_user_info(frappe.session.user, user_info)
- if frappe.session.user == "Administrator" and user_info.Administrator.email:
- user_info[user_info.Administrator.email] = user_info.Administrator
-
return user_info
|
pymedusa__Medusa-6527 | Parse Error During Postprocessor Email Notification
**Describe the bug**
Whenever I download "Jeopardy!" Medusa snatches the show and postprocesses it correctly EXCEPT that it always generates this warning:
> WARNING POSTPROCESSOR :: [f46bfac] Unable to parse "Jeopardy! - 2019 04 15 - Returning Champion Vs. , show # 7971 - 720p HDTV" for email notification
I suspect there's something in the showname string that it doesn't like. This is an uncommon string, with several unusual characters (such as the exclamation mark, pound sign, period, and comma), and this doesn't happen with any other show.
Normally, this wouldn't be a big deal, but it's annoying because the show airs every weekday, so it happens five times a week, 46 weeks a year.
**Medusa (please complete the following information):**
- OS: Windows-7-6.1.7601-SP1
- Branch: master
- Commit: f46bfacf8763204fbde4f26a5916095371d494d1
</details>
| [
{
"content": "# coding=utf-8\n\"\"\"Email notifier module.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport ast\nimport logging\nimport re\nimport smtplib\nfrom builtins import object\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.utils import formatdate\n\nfrom medusa import app, db\nfrom medusa.common import (\n NOTIFY_DOWNLOAD,\n NOTIFY_GIT_UPDATE,\n NOTIFY_LOGIN,\n NOTIFY_SUBTITLE_DOWNLOAD,\n notifyStrings,\n)\nfrom medusa.logger.adapters.style import BraceAdapter\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass Notifier(object):\n \"\"\"\n Email notifier class.\n\n Possible patterns for the `ep_name` input:\n Downloaded/Snatched:\n %SN - %Sx%0E - %EN - %QN\n %SN - %Sx%0E - %AB - %EN - %QN\n Subtitle Downloaded:\n %SN - %AB - %EN\n %SN - %AD - %EN\n %SN - S%0SE%0E - %EN\n \"\"\"\n\n name_pattern = re.compile(\n r'(?P<show>.+?) - '\n r'(?P<ep_id>S?\\d+[Ex]\\d+( - \\d{3})?|\\d{3}|\\d{4}-\\d{2}-\\d{2}) - '\n r'(?P<episode>.*)'\n )\n\n def __init__(self):\n self.last_err = None\n\n def test_notify(self, host, port, smtp_from, use_tls, user, pwd, to):\n \"\"\"\n Send a test notification.\n\n :return: True for no issue or False if there was an error\n \"\"\"\n msg = MIMEText('This is a test message from Medusa. If you\\'re reading this, the test succeeded.')\n if app.EMAIL_SUBJECT:\n msg['Subject'] = '[TEST] {0}'.format(app.EMAIL_SUBJECT)\n else:\n msg['Subject'] = 'Medusa: Test Message'\n msg['From'] = smtp_from\n msg['To'] = to\n msg['Date'] = formatdate(localtime=True)\n return self._sendmail(host, port, smtp_from, use_tls, user, pwd, [to], msg, True)\n\n def notify_snatch(self, title, message):\n \"\"\"\n Send a notification that an episode was snatched.\n\n ep_name: The name of the episode that was snatched\n \"\"\"\n if app.USE_EMAIL and app.EMAIL_NOTIFY_ONSNATCH:\n parsed = self._parse_name(message)\n to = self._generate_recipients(parsed['show'])\n if not to:\n log.debug('Skipping email notify because there are no configured recipients')\n else:\n try:\n msg = MIMEMultipart('alternative')\n msg.attach(MIMEText(\n '<body style=\"font-family:Helvetica, Arial, sans-serif;\">'\n '<h3>Medusa Notification - Snatched</h3><br>'\n '<p>Show: <b>{show}</b></p><br>'\n '<p>Episode: <b>{ep_id}{episode}</b></p><br><br>'\n '<footer style=\"margin-top: 2.5em; padding: .7em 0; '\n 'color: #777; border-top: #BBB solid 1px;\">'\n 'Powered by Medusa.</footer></body>'.format(\n show=parsed['show'],\n ep_id=(parsed['ep_id'] + ' - ') if 'ep_id' in parsed else '',\n episode=parsed['episode']\n ),\n 'html'))\n\n except Exception:\n try:\n msg = MIMEText(message)\n except Exception:\n msg = MIMEText(title)\n\n if app.EMAIL_SUBJECT:\n msg['Subject'] = '{0}: {1}'.format(title, app.EMAIL_SUBJECT)\n else:\n msg['Subject'] = '{0}: {1}'.format(title, message)\n msg['From'] = app.EMAIL_FROM\n msg['To'] = ','.join(to)\n msg['Date'] = formatdate(localtime=True)\n\n if self._sendmail(app.EMAIL_HOST, app.EMAIL_PORT, app.EMAIL_FROM, app.EMAIL_TLS,\n app.EMAIL_USER, app.EMAIL_PASSWORD, to, msg):\n log.debug('Snatch notification sent to {recipient} for {episode}',\n {'recipient': to, 'episode': message})\n else:\n log.warning('Snatch notification error: {0}', self.last_err)\n\n def notify_download(self, ep_obj, title='Completed:'):\n \"\"\"\n Send a notification that an episode was downloaded.\n\n ep_name: The name of the episode that was downloaded\n title: The title of the notification (optional)\n \"\"\"\n if app.USE_EMAIL and app.EMAIL_NOTIFY_ONDOWNLOAD:\n title = notifyStrings[NOTIFY_DOWNLOAD]\n ep_name = ep_obj.pretty_name_with_quality()\n\n parsed = self._parse_name(ep_name)\n to = self._generate_recipients(parsed['show'])\n if not to:\n log.debug('Skipping email notify because there are no configured recipients')\n else:\n try:\n msg = MIMEMultipart('alternative')\n msg.attach(MIMEText(\n '<body style=\"font-family:Helvetica, Arial, sans-serif;\">'\n '<h3>Medusa Notification - Downloaded</h3><br>'\n '<p>Show: <b>{show}</b></p><br>'\n '<p>Episode: <b>{ep_id}{episode}</b></p><br><br>'\n '<footer style=\"margin-top: 2.5em; padding: .7em 0; '\n 'color: #777; border-top: #BBB solid 1px;\">'\n 'Powered by Medusa.</footer></body>'.format(\n show=parsed['show'],\n ep_id=(parsed['ep_id'] + ' - ') if 'ep_id' in parsed else '',\n episode=parsed['episode']\n ),\n 'html'))\n\n except Exception:\n try:\n msg = MIMEText(ep_name)\n except Exception:\n msg = MIMEText(title)\n\n if app.EMAIL_SUBJECT:\n msg['Subject'] = '{0}: {1}'.format(title, app.EMAIL_SUBJECT)\n else:\n msg['Subject'] = '{0}: {1}'.format(title, ep_name)\n msg['From'] = app.EMAIL_FROM\n msg['To'] = ','.join(to)\n msg['Date'] = formatdate(localtime=True)\n\n if self._sendmail(app.EMAIL_HOST, app.EMAIL_PORT, app.EMAIL_FROM, app.EMAIL_TLS,\n app.EMAIL_USER, app.EMAIL_PASSWORD, to, msg):\n log.debug('Download notification sent to {recipient} for {episode}',\n {'recipient': to, 'episode': ep_name})\n else:\n log.warning('Download notification error: {0}', self.last_err)\n\n def notify_subtitle_download(self, ep_obj, lang):\n \"\"\"\n Send a notification that a subtitle was downloaded.\n\n ep_name: The name of the episode that was downloaded\n lang: Subtitle language wanted\n \"\"\"\n if app.USE_EMAIL and app.EMAIL_NOTIFY_ONSUBTITLEDOWNLOAD:\n title = notifyStrings[NOTIFY_SUBTITLE_DOWNLOAD]\n ep_name = ep_obj.pretty_name()\n\n parsed = self._parse_name(ep_name)\n to = self._generate_recipients(parsed['show'])\n if not to:\n log.debug('Skipping email notify because there are no configured recipients')\n else:\n try:\n msg = MIMEMultipart('alternative')\n msg.attach(MIMEText(\n '<body style=\"font-family:Helvetica, Arial, sans-serif;\">'\n '<h3>Medusa Notification - Subtitle Downloaded</h3><br>'\n '<p>Show: <b>{show}</b></p><br>'\n '<p>Episode: <b>{ep_id}{episode}</b></p><br>'\n '<p>Language: <b>{lang}</b></p><br><br>'\n '<footer style=\"margin-top: 2.5em; padding: .7em 0; '\n 'color: #777; border-top: #BBB solid 1px;\">'\n 'Powered by Medusa.</footer></body>'.format(\n show=parsed['show'],\n ep_id=(parsed['ep_id'] + ' - ') if 'ep_id' in parsed else '',\n episode=parsed['episode'],\n lang=lang\n ),\n 'html'))\n except Exception:\n try:\n msg = MIMEText('{0}: {1}'.format(ep_name, lang))\n except Exception:\n msg = MIMEText(title)\n\n if app.EMAIL_SUBJECT:\n msg['Subject'] = '{0} [{1}]: {2}'.format(title, lang, app.EMAIL_SUBJECT)\n else:\n msg['Subject'] = '{0} [{1}]: {2}'.format(title, lang, ep_name)\n msg['From'] = app.EMAIL_FROM\n msg['To'] = ','.join(to)\n\n if self._sendmail(app.EMAIL_HOST, app.EMAIL_PORT, app.EMAIL_FROM, app.EMAIL_TLS,\n app.EMAIL_USER, app.EMAIL_PASSWORD, to, msg):\n log.debug('Download notification sent to {recipient} for {episode}',\n {'recipient': to, 'episode': ep_name})\n else:\n log.warning('Download notification error: {0}', self.last_err)\n\n def notify_git_update(self, new_version='??'):\n \"\"\"\n Send a notification that Medusa was updated.\n\n new_version: The commit Medusa was updated to\n \"\"\"\n if app.USE_EMAIL:\n title = notifyStrings[NOTIFY_GIT_UPDATE]\n to = self._generate_recipients(None)\n if not to:\n log.debug('Skipping email notify because there are no configured recipients')\n else:\n try:\n msg = MIMEMultipart('alternative')\n msg.attach(MIMEText(\n '<body style=\"font-family:Helvetica, Arial, sans-serif;\">'\n '<h3>Medusa Notification - Updated</h3><br>'\n '<p>Commit: <b>{0}</b></p><br><br>'\n '<footer style=\"margin-top: 2.5em; padding: .7em 0; '\n 'color: #777; border-top: #BBB solid 1px;\">'\n 'Powered by Medusa.</footer></body>'.format\n (new_version), 'html'))\n\n except Exception:\n try:\n msg = MIMEText(new_version)\n except Exception:\n msg = MIMEText(title)\n\n msg['Subject'] = '{0}: {1}'.format(title, new_version)\n msg['From'] = app.EMAIL_FROM\n msg['To'] = ','.join(to)\n msg['Date'] = formatdate(localtime=True)\n\n if self._sendmail(app.EMAIL_HOST, app.EMAIL_PORT, app.EMAIL_FROM, app.EMAIL_TLS,\n app.EMAIL_USER, app.EMAIL_PASSWORD, to, msg):\n log.debug('Update notification sent to {recipient}',\n {'recipient': to})\n else:\n log.warning('Update notification error: {0}', self.last_err)\n\n def notify_login(self, ipaddress=''):\n \"\"\"\n Send a notification that Medusa was logged into remotely.\n\n ipaddress: The ip Medusa was logged into from\n \"\"\"\n if app.USE_EMAIL:\n title = notifyStrings[NOTIFY_LOGIN]\n to = self._generate_recipients(None)\n if not to:\n log.debug('Skipping email notify because there are no configured recipients')\n else:\n try:\n msg = MIMEMultipart('alternative')\n msg.attach(MIMEText(\n '<body style=\"font-family:Helvetica, Arial, sans-serif;\">'\n '<h3>Medusa Notification - Remote Login</h3><br>'\n '<p>New login from IP: <a href=\"http://geomaplookup.net/?ip={0}\">{0}</a>.<br><br>'\n '<footer style=\"margin-top: 2.5em; padding: .7em 0; '\n 'color: #777; border-top: #BBB solid 1px;\">'\n 'Powered by Medusa.</footer></body>'.format\n (ipaddress), 'html'))\n\n except Exception:\n try:\n msg = MIMEText(ipaddress)\n except Exception:\n msg = MIMEText(title)\n\n msg['Subject'] = '{0}: {1}'.format(title, ipaddress)\n msg['From'] = app.EMAIL_FROM\n msg['To'] = ','.join(to)\n msg['Date'] = formatdate(localtime=True)\n\n if self._sendmail(app.EMAIL_HOST, app.EMAIL_PORT, app.EMAIL_FROM, app.EMAIL_TLS,\n app.EMAIL_USER, app.EMAIL_PASSWORD, to, msg):\n log.debug('Login notification sent to {recipient}', {'recipient': to})\n else:\n log.warning('Login notification error: {0}', self.last_err)\n\n @staticmethod\n def _generate_recipients(show):\n addrs = []\n main_db_con = db.DBConnection()\n\n # Grab the global recipients\n if app.EMAIL_LIST:\n addrs.extend(\n addr for addr in app.EMAIL_LIST\n if addr.strip()\n )\n\n # Grab the per-show-notification recipients\n if show:\n sql_results = main_db_con.select(\n 'SELECT notify_list '\n 'FROM tv_shows '\n 'WHERE show_name = ?',\n [show]\n )\n for row in sql_results:\n notify_list = row['notify_list']\n if not notify_list:\n continue\n\n if notify_list[0] == '{':\n entries = dict(ast.literal_eval(notify_list))\n notify_list = entries['emails']\n\n addrs.extend(\n addr for addr in notify_list.split(',')\n if addr.strip()\n )\n\n addrs = set(addrs)\n log.debug('Notification recipients: {0}', addrs)\n return addrs\n\n def _sendmail(self, host, port, smtp_from, use_tls, user, pwd, to, msg, smtp_debug=False):\n log.debug(\n 'HOST: {host}; PORT: {port}; FROM: {sender}, TLS: {tls},'\n ' USER: {user}, PWD: {password}, TO: {recipient}', {\n 'host': host,\n 'port': port,\n 'sender': smtp_from,\n 'tls': use_tls,\n 'user': user,\n 'password': pwd,\n 'recipient': to,\n }\n )\n try:\n srv = smtplib.SMTP(host, int(port))\n except Exception as error:\n log.warning('Exception generated while sending e-mail: {0}', error)\n # logger.log(traceback.format_exc(), logger.DEBUG)\n self.last_err = '{0}'.format(error)\n return False\n\n if smtp_debug:\n srv.set_debuglevel(1)\n try:\n if use_tls in ('1', True) or (user and pwd):\n log.debug('Sending initial EHLO command!')\n srv.ehlo()\n if use_tls in ('1', True):\n log.debug('Sending STARTTLS command!')\n srv.starttls()\n srv.ehlo()\n if user and pwd:\n log.debug('Sending LOGIN command!')\n srv.login(user, pwd)\n\n srv.sendmail(smtp_from, to, msg.as_string())\n srv.quit()\n return True\n except Exception as error:\n self.last_err = '{0}'.format(error)\n return False\n\n @classmethod\n def _parse_name(cls, ep_name):\n # @TODO: Prone to issues, best solution is to have a dictionary passed to notifiers\n match = cls.name_pattern.match(ep_name)\n\n # Fallback\n if not match:\n # @TODO: This won't be needed when notifiers receive a dictionary\n log.warning('Unable to parse \"{0}\" for email notification', ep_name)\n titles = ep_name.split(' - ')\n return {\n 'show': titles[0],\n 'episode': ' - '.join(titles[1:])\n }\n\n result = match.groupdict()\n\n log.debug('Email notifier parsed \"{0}\" into {1!r}',\n ep_name, result)\n\n return result\n",
"path": "medusa/notifiers/emailnotify.py"
}
] | [
{
"content": "# coding=utf-8\n\"\"\"Email notifier module.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport ast\nimport logging\nimport re\nimport smtplib\nfrom builtins import object\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.utils import formatdate\n\nfrom medusa import app, db\nfrom medusa.common import (\n NOTIFY_DOWNLOAD,\n NOTIFY_GIT_UPDATE,\n NOTIFY_LOGIN,\n NOTIFY_SUBTITLE_DOWNLOAD,\n notifyStrings,\n)\nfrom medusa.logger.adapters.style import BraceAdapter\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass Notifier(object):\n \"\"\"\n Email notifier class.\n\n Possible patterns for the `ep_name` input:\n Downloaded/Snatched:\n %SN - %Sx%0E - %EN - %QN\n %SN - %Sx%0E - %AB - %EN - %QN\n Subtitle Downloaded:\n %SN - %AB - %EN\n %SN - %AD - %EN\n %SN - S%0SE%0E - %EN\n \"\"\"\n\n name_pattern = re.compile(\n r'(?P<show>.+?) - '\n r'(?P<ep_id>S?\\d+[Ex]\\d+( - \\d{3})?|\\d{3}|\\d{4} \\d{2} \\d{2}) - '\n r'(?P<episode>.*)'\n )\n\n def __init__(self):\n self.last_err = None\n\n def test_notify(self, host, port, smtp_from, use_tls, user, pwd, to):\n \"\"\"\n Send a test notification.\n\n :return: True for no issue or False if there was an error\n \"\"\"\n msg = MIMEText('This is a test message from Medusa. If you\\'re reading this, the test succeeded.')\n if app.EMAIL_SUBJECT:\n msg['Subject'] = '[TEST] {0}'.format(app.EMAIL_SUBJECT)\n else:\n msg['Subject'] = 'Medusa: Test Message'\n msg['From'] = smtp_from\n msg['To'] = to\n msg['Date'] = formatdate(localtime=True)\n return self._sendmail(host, port, smtp_from, use_tls, user, pwd, [to], msg, True)\n\n def notify_snatch(self, title, message):\n \"\"\"\n Send a notification that an episode was snatched.\n\n ep_name: The name of the episode that was snatched\n \"\"\"\n if app.USE_EMAIL and app.EMAIL_NOTIFY_ONSNATCH:\n parsed = self._parse_name(message)\n to = self._generate_recipients(parsed['show'])\n if not to:\n log.debug('Skipping email notify because there are no configured recipients')\n else:\n try:\n msg = MIMEMultipart('alternative')\n msg.attach(MIMEText(\n '<body style=\"font-family:Helvetica, Arial, sans-serif;\">'\n '<h3>Medusa Notification - Snatched</h3><br>'\n '<p>Show: <b>{show}</b></p><br>'\n '<p>Episode: <b>{ep_id}{episode}</b></p><br><br>'\n '<footer style=\"margin-top: 2.5em; padding: .7em 0; '\n 'color: #777; border-top: #BBB solid 1px;\">'\n 'Powered by Medusa.</footer></body>'.format(\n show=parsed['show'],\n ep_id=(parsed['ep_id'] + ' - ') if 'ep_id' in parsed else '',\n episode=parsed['episode']\n ),\n 'html'))\n\n except Exception:\n try:\n msg = MIMEText(message)\n except Exception:\n msg = MIMEText(title)\n\n if app.EMAIL_SUBJECT:\n msg['Subject'] = '{0}: {1}'.format(title, app.EMAIL_SUBJECT)\n else:\n msg['Subject'] = '{0}: {1}'.format(title, message)\n msg['From'] = app.EMAIL_FROM\n msg['To'] = ','.join(to)\n msg['Date'] = formatdate(localtime=True)\n\n if self._sendmail(app.EMAIL_HOST, app.EMAIL_PORT, app.EMAIL_FROM, app.EMAIL_TLS,\n app.EMAIL_USER, app.EMAIL_PASSWORD, to, msg):\n log.debug('Snatch notification sent to {recipient} for {episode}',\n {'recipient': to, 'episode': message})\n else:\n log.warning('Snatch notification error: {0}', self.last_err)\n\n def notify_download(self, ep_obj, title='Completed:'):\n \"\"\"\n Send a notification that an episode was downloaded.\n\n ep_name: The name of the episode that was downloaded\n title: The title of the notification (optional)\n \"\"\"\n if app.USE_EMAIL and app.EMAIL_NOTIFY_ONDOWNLOAD:\n title = notifyStrings[NOTIFY_DOWNLOAD]\n ep_name = ep_obj.pretty_name_with_quality()\n\n parsed = self._parse_name(ep_name)\n to = self._generate_recipients(parsed['show'])\n if not to:\n log.debug('Skipping email notify because there are no configured recipients')\n else:\n try:\n msg = MIMEMultipart('alternative')\n msg.attach(MIMEText(\n '<body style=\"font-family:Helvetica, Arial, sans-serif;\">'\n '<h3>Medusa Notification - Downloaded</h3><br>'\n '<p>Show: <b>{show}</b></p><br>'\n '<p>Episode: <b>{ep_id}{episode}</b></p><br><br>'\n '<footer style=\"margin-top: 2.5em; padding: .7em 0; '\n 'color: #777; border-top: #BBB solid 1px;\">'\n 'Powered by Medusa.</footer></body>'.format(\n show=parsed['show'],\n ep_id=(parsed['ep_id'] + ' - ') if 'ep_id' in parsed else '',\n episode=parsed['episode']\n ),\n 'html'))\n\n except Exception:\n try:\n msg = MIMEText(ep_name)\n except Exception:\n msg = MIMEText(title)\n\n if app.EMAIL_SUBJECT:\n msg['Subject'] = '{0}: {1}'.format(title, app.EMAIL_SUBJECT)\n else:\n msg['Subject'] = '{0}: {1}'.format(title, ep_name)\n msg['From'] = app.EMAIL_FROM\n msg['To'] = ','.join(to)\n msg['Date'] = formatdate(localtime=True)\n\n if self._sendmail(app.EMAIL_HOST, app.EMAIL_PORT, app.EMAIL_FROM, app.EMAIL_TLS,\n app.EMAIL_USER, app.EMAIL_PASSWORD, to, msg):\n log.debug('Download notification sent to {recipient} for {episode}',\n {'recipient': to, 'episode': ep_name})\n else:\n log.warning('Download notification error: {0}', self.last_err)\n\n def notify_subtitle_download(self, ep_obj, lang):\n \"\"\"\n Send a notification that a subtitle was downloaded.\n\n ep_name: The name of the episode that was downloaded\n lang: Subtitle language wanted\n \"\"\"\n if app.USE_EMAIL and app.EMAIL_NOTIFY_ONSUBTITLEDOWNLOAD:\n title = notifyStrings[NOTIFY_SUBTITLE_DOWNLOAD]\n ep_name = ep_obj.pretty_name()\n\n parsed = self._parse_name(ep_name)\n to = self._generate_recipients(parsed['show'])\n if not to:\n log.debug('Skipping email notify because there are no configured recipients')\n else:\n try:\n msg = MIMEMultipart('alternative')\n msg.attach(MIMEText(\n '<body style=\"font-family:Helvetica, Arial, sans-serif;\">'\n '<h3>Medusa Notification - Subtitle Downloaded</h3><br>'\n '<p>Show: <b>{show}</b></p><br>'\n '<p>Episode: <b>{ep_id}{episode}</b></p><br>'\n '<p>Language: <b>{lang}</b></p><br><br>'\n '<footer style=\"margin-top: 2.5em; padding: .7em 0; '\n 'color: #777; border-top: #BBB solid 1px;\">'\n 'Powered by Medusa.</footer></body>'.format(\n show=parsed['show'],\n ep_id=(parsed['ep_id'] + ' - ') if 'ep_id' in parsed else '',\n episode=parsed['episode'],\n lang=lang\n ),\n 'html'))\n except Exception:\n try:\n msg = MIMEText('{0}: {1}'.format(ep_name, lang))\n except Exception:\n msg = MIMEText(title)\n\n if app.EMAIL_SUBJECT:\n msg['Subject'] = '{0} [{1}]: {2}'.format(title, lang, app.EMAIL_SUBJECT)\n else:\n msg['Subject'] = '{0} [{1}]: {2}'.format(title, lang, ep_name)\n msg['From'] = app.EMAIL_FROM\n msg['To'] = ','.join(to)\n\n if self._sendmail(app.EMAIL_HOST, app.EMAIL_PORT, app.EMAIL_FROM, app.EMAIL_TLS,\n app.EMAIL_USER, app.EMAIL_PASSWORD, to, msg):\n log.debug('Download notification sent to {recipient} for {episode}',\n {'recipient': to, 'episode': ep_name})\n else:\n log.warning('Download notification error: {0}', self.last_err)\n\n def notify_git_update(self, new_version='??'):\n \"\"\"\n Send a notification that Medusa was updated.\n\n new_version: The commit Medusa was updated to\n \"\"\"\n if app.USE_EMAIL:\n title = notifyStrings[NOTIFY_GIT_UPDATE]\n to = self._generate_recipients(None)\n if not to:\n log.debug('Skipping email notify because there are no configured recipients')\n else:\n try:\n msg = MIMEMultipart('alternative')\n msg.attach(MIMEText(\n '<body style=\"font-family:Helvetica, Arial, sans-serif;\">'\n '<h3>Medusa Notification - Updated</h3><br>'\n '<p>Commit: <b>{0}</b></p><br><br>'\n '<footer style=\"margin-top: 2.5em; padding: .7em 0; '\n 'color: #777; border-top: #BBB solid 1px;\">'\n 'Powered by Medusa.</footer></body>'.format\n (new_version), 'html'))\n\n except Exception:\n try:\n msg = MIMEText(new_version)\n except Exception:\n msg = MIMEText(title)\n\n msg['Subject'] = '{0}: {1}'.format(title, new_version)\n msg['From'] = app.EMAIL_FROM\n msg['To'] = ','.join(to)\n msg['Date'] = formatdate(localtime=True)\n\n if self._sendmail(app.EMAIL_HOST, app.EMAIL_PORT, app.EMAIL_FROM, app.EMAIL_TLS,\n app.EMAIL_USER, app.EMAIL_PASSWORD, to, msg):\n log.debug('Update notification sent to {recipient}',\n {'recipient': to})\n else:\n log.warning('Update notification error: {0}', self.last_err)\n\n def notify_login(self, ipaddress=''):\n \"\"\"\n Send a notification that Medusa was logged into remotely.\n\n ipaddress: The ip Medusa was logged into from\n \"\"\"\n if app.USE_EMAIL:\n title = notifyStrings[NOTIFY_LOGIN]\n to = self._generate_recipients(None)\n if not to:\n log.debug('Skipping email notify because there are no configured recipients')\n else:\n try:\n msg = MIMEMultipart('alternative')\n msg.attach(MIMEText(\n '<body style=\"font-family:Helvetica, Arial, sans-serif;\">'\n '<h3>Medusa Notification - Remote Login</h3><br>'\n '<p>New login from IP: <a href=\"http://geomaplookup.net/?ip={0}\">{0}</a>.<br><br>'\n '<footer style=\"margin-top: 2.5em; padding: .7em 0; '\n 'color: #777; border-top: #BBB solid 1px;\">'\n 'Powered by Medusa.</footer></body>'.format\n (ipaddress), 'html'))\n\n except Exception:\n try:\n msg = MIMEText(ipaddress)\n except Exception:\n msg = MIMEText(title)\n\n msg['Subject'] = '{0}: {1}'.format(title, ipaddress)\n msg['From'] = app.EMAIL_FROM\n msg['To'] = ','.join(to)\n msg['Date'] = formatdate(localtime=True)\n\n if self._sendmail(app.EMAIL_HOST, app.EMAIL_PORT, app.EMAIL_FROM, app.EMAIL_TLS,\n app.EMAIL_USER, app.EMAIL_PASSWORD, to, msg):\n log.debug('Login notification sent to {recipient}', {'recipient': to})\n else:\n log.warning('Login notification error: {0}', self.last_err)\n\n @staticmethod\n def _generate_recipients(show):\n addrs = []\n main_db_con = db.DBConnection()\n\n # Grab the global recipients\n if app.EMAIL_LIST:\n addrs.extend(\n addr for addr in app.EMAIL_LIST\n if addr.strip()\n )\n\n # Grab the per-show-notification recipients\n if show:\n sql_results = main_db_con.select(\n 'SELECT notify_list '\n 'FROM tv_shows '\n 'WHERE show_name = ?',\n [show]\n )\n for row in sql_results:\n notify_list = row['notify_list']\n if not notify_list:\n continue\n\n if notify_list[0] == '{':\n entries = dict(ast.literal_eval(notify_list))\n notify_list = entries['emails']\n\n addrs.extend(\n addr for addr in notify_list.split(',')\n if addr.strip()\n )\n\n addrs = set(addrs)\n log.debug('Notification recipients: {0}', addrs)\n return addrs\n\n def _sendmail(self, host, port, smtp_from, use_tls, user, pwd, to, msg, smtp_debug=False):\n log.debug(\n 'HOST: {host}; PORT: {port}; FROM: {sender}, TLS: {tls},'\n ' USER: {user}, PWD: {password}, TO: {recipient}', {\n 'host': host,\n 'port': port,\n 'sender': smtp_from,\n 'tls': use_tls,\n 'user': user,\n 'password': pwd,\n 'recipient': to,\n }\n )\n try:\n srv = smtplib.SMTP(host, int(port))\n except Exception as error:\n log.warning('Exception generated while sending e-mail: {0}', error)\n # logger.log(traceback.format_exc(), logger.DEBUG)\n self.last_err = '{0}'.format(error)\n return False\n\n if smtp_debug:\n srv.set_debuglevel(1)\n try:\n if use_tls in ('1', True) or (user and pwd):\n log.debug('Sending initial EHLO command!')\n srv.ehlo()\n if use_tls in ('1', True):\n log.debug('Sending STARTTLS command!')\n srv.starttls()\n srv.ehlo()\n if user and pwd:\n log.debug('Sending LOGIN command!')\n srv.login(user, pwd)\n\n srv.sendmail(smtp_from, to, msg.as_string())\n srv.quit()\n return True\n except Exception as error:\n self.last_err = '{0}'.format(error)\n return False\n\n @classmethod\n def _parse_name(cls, ep_name):\n # @TODO: Prone to issues, best solution is to have a dictionary passed to notifiers\n match = cls.name_pattern.match(ep_name)\n\n # Fallback\n if not match:\n # @TODO: This won't be needed when notifiers receive a dictionary\n log.warning('Unable to parse \"{0}\" for email notification', ep_name)\n titles = ep_name.split(' - ')\n return {\n 'show': titles[0],\n 'episode': ' - '.join(titles[1:])\n }\n\n result = match.groupdict()\n\n log.debug('Email notifier parsed \"{0}\" into {1!r}',\n ep_name, result)\n\n return result\n",
"path": "medusa/notifiers/emailnotify.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5979f5fdf7..e3b4a08530 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,6 +9,7 @@
- Fixed extra scripts running with Python 3 ([#6428](https://github.com/pymedusa/Medusa/pull/6428))
- Fixed Torrenting provider exception when offline ([#6430](https://github.com/pymedusa/Medusa/pull/6430))
- Fixed snatching of air by date shows specials ([#6457](https://github.com/pymedusa/Medusa/pull/6457))
+- Fixed email notifier name parser warning for ABD episodes ([#6527](https://github.com/pymedusa/Medusa/pull/6527))
## 0.3.1 (2019-03-20)
diff --git a/medusa/notifiers/emailnotify.py b/medusa/notifiers/emailnotify.py
index c19180dc96..2ffc9bf208 100644
--- a/medusa/notifiers/emailnotify.py
+++ b/medusa/notifiers/emailnotify.py
@@ -42,7 +42,7 @@ class Notifier(object):
name_pattern = re.compile(
r'(?P<show>.+?) - '
- r'(?P<ep_id>S?\d+[Ex]\d+( - \d{3})?|\d{3}|\d{4}-\d{2}-\d{2}) - '
+ r'(?P<ep_id>S?\d+[Ex]\d+( - \d{3})?|\d{3}|\d{4} \d{2} \d{2}) - '
r'(?P<episode>.*)'
)
diff --git a/tests/notifiers/test_emailnotify.py b/tests/notifiers/test_emailnotify.py
index 79678baa0c..40b47237a8 100644
--- a/tests/notifiers/test_emailnotify.py
+++ b/tests/notifiers/test_emailnotify.py
@@ -99,26 +99,26 @@
}
},
{ # p11 - [%SN - %AD - %EN] - hypen in episode name
- 'ep_name': 'Archer - 2017-08-12 - Danger Island - Disheartening Situation',
+ 'ep_name': 'Archer - 2017 08 12 - Danger Island - Disheartening Situation',
'expected': {
'show': 'Archer',
- 'ep_id': '2017-08-12',
+ 'ep_id': '2017 08 12',
'episode': 'Danger Island - Disheartening Situation'
}
},
{ # p12 - [%SN - %AD - %EN]
- 'ep_name': 'Jersey Shore Family Vacation - 2018-04-20 - Meatball Down',
+ 'ep_name': 'Jersey Shore Family Vacation - 2018 04 20 - Meatball Down',
'expected': {
'show': 'Jersey Shore Family Vacation',
- 'ep_id': '2018-04-20',
+ 'ep_id': '2018 04 20',
'episode': 'Meatball Down'
}
},
{ # p13 - [%SN - %AD - %EN] - empty episode name
- 'ep_name': 'Jersey Shore Family Vacation - 2018-04-20 - ',
+ 'ep_name': 'Jersey Shore Family Vacation - 2018 04 20 - ',
'expected': {
'show': 'Jersey Shore Family Vacation',
- 'ep_id': '2018-04-20',
+ 'ep_id': '2018 04 20',
'episode': ''
}
},
|
Uberspace__lab-1000 | link to "view changelog" no longer works
"view changelog" on each page will lead to the old master branch which now should be `main`
| [
{
"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Uberspace 7 lab documentation build configuration file, created by\n# sphinx-quickstart on Tue Feb 13 12:19:29 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nimport sphinx_rtd_theme\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"authorship\",\n \"sphinx.ext.extlinks\",\n \"notfound.extension\",\n \"sphinxcontrib.spelling\",\n]\n\n# Add spelling settings\ntokenizer_lang = \"en_GB\"\nspelling_word_list_filename = \"dict.txt\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"UberLab\"\ncopyright = \"2018, uberspace.de\"\nauthor = \"uberspace.de\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nrelease = version = \"7\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n# Configure the `extlinks` extension to handle the `manual` and `lab`\n# directives.\n# By setting an empty string as the second tuple element, the display text\n# is the same as the target by default.\nextlinks = {\n \"manual\": (\"https://manual.uberspace.de/%s.html\", \"\"),\n \"manual_anchor\": (\"https://manual.uberspace.de/%s\", \"\"),\n \"lab\": (\"%s.html\", \"\"),\n \"lab_anchor\": (\"%s\", \"\"),\n}\n\n\n# -- Options for HTML output ----------------------------------------------\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_options = {\n \"display_version\": False,\n \"navigation_depth\": 2,\n \"collapse_navigation\": True,\n}\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_last_updated_fmt = \"%b %d, %Y\"\nhtml_context = {\n \"css_files\": [\"_static/css/custom.css\"],\n \"display_github\": True,\n \"github_user\": \"Uberspace\",\n \"github_repo\": \"lab\",\n \"github_version\": \"master\",\n \"conf_py_path\": \"/source/\",\n}\nhtml_show_copyright = False\nhtml_favicon = \"_static/favicon.ico\"\n\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\nhtml_extra_path = [\"_redirects\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n \"**\": [\n \"relations.html\", # needs 'show_related': True theme option to display\n \"searchbox.html\",\n ]\n}\n\n# sphinx-notfound-page\n# https://github.com/rtfd/sphinx-notfound-page\nnotfound_no_urls_prefix = True\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Uberspace7labdoc\"\n\n\nexclude_patterns = [\"404.rst\", \"includes/proxy-rewrite-static.rst\"]\n\n\ndef setup(app):\n app.add_js_file(\"js/custom.js\")\n",
"path": "source/conf.py"
}
] | [
{
"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Uberspace 7 lab documentation build configuration file, created by\n# sphinx-quickstart on Tue Feb 13 12:19:29 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nimport sphinx_rtd_theme\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"authorship\",\n \"sphinx.ext.extlinks\",\n \"notfound.extension\",\n \"sphinxcontrib.spelling\",\n]\n\n# Add spelling settings\ntokenizer_lang = \"en_GB\"\nspelling_word_list_filename = \"dict.txt\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"UberLab\"\ncopyright = \"2018, uberspace.de\"\nauthor = \"uberspace.de\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nrelease = version = \"7\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n# Configure the `extlinks` extension to handle the `manual` and `lab`\n# directives.\n# By setting an empty string as the second tuple element, the display text\n# is the same as the target by default.\nextlinks = {\n \"manual\": (\"https://manual.uberspace.de/%s.html\", \"\"),\n \"manual_anchor\": (\"https://manual.uberspace.de/%s\", \"\"),\n \"lab\": (\"%s.html\", \"\"),\n \"lab_anchor\": (\"%s\", \"\"),\n}\n\n\n# -- Options for HTML output ----------------------------------------------\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_options = {\n \"display_version\": False,\n \"navigation_depth\": 2,\n \"collapse_navigation\": True,\n}\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_last_updated_fmt = \"%b %d, %Y\"\nhtml_context = {\n \"css_files\": [\"_static/css/custom.css\"],\n \"display_github\": True,\n \"github_user\": \"Uberspace\",\n \"github_repo\": \"lab\",\n \"github_version\": \"main\",\n \"conf_py_path\": \"/source/\",\n}\nhtml_show_copyright = False\nhtml_favicon = \"_static/favicon.ico\"\n\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\nhtml_extra_path = [\"_redirects\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n \"**\": [\n \"relations.html\", # needs 'show_related': True theme option to display\n \"searchbox.html\",\n ]\n}\n\n# sphinx-notfound-page\n# https://github.com/rtfd/sphinx-notfound-page\nnotfound_no_urls_prefix = True\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Uberspace7labdoc\"\n\n\nexclude_patterns = [\"404.rst\", \"includes/proxy-rewrite-static.rst\"]\n\n\ndef setup(app):\n app.add_js_file(\"js/custom.js\")\n",
"path": "source/conf.py"
}
] | diff --git a/source/conf.py b/source/conf.py
index 987f5b37..6551eb99 100644
--- a/source/conf.py
+++ b/source/conf.py
@@ -112,7 +112,7 @@
"display_github": True,
"github_user": "Uberspace",
"github_repo": "lab",
- "github_version": "master",
+ "github_version": "main",
"conf_py_path": "/source/",
}
html_show_copyright = False
|
ivy-llc__ivy-27165 | Using PEP 585 annotations without a `from __future__ import annotations` import
In the following line:
https://github.com/unifyai/ivy/blob/ea0eaad440ff7f953c2fb4c621eccfe4a6fb2ecd/ivy/functional/backends/paddle/experimental/statistical.py#L422
It should be `Tuple[Any, ...]:` because we are not using the `from __future__ import annotations` import.
Using `tuple[Any, ...]:` without the `from __future__ import annotations` import will cause runtime errors on Python versions prior to 3.9 and 3.10, respectively.
So, it should be fixed.
| [
{
"content": "# global\nfrom typing import Optional, Union, Tuple, Sequence, Any\nimport paddle\nimport ivy.functional.backends.paddle as paddle_backend\nimport ivy\nfrom copy import deepcopy\n\n# local\nfrom ivy.func_wrapper import (\n with_unsupported_device_and_dtypes,\n with_supported_dtypes,\n)\nfrom . import backend_version\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"complex\", \"float32\", \"float64\", \"int32\", \"int64\")},\n backend_version,\n)\ndef median(\n input: paddle.Tensor,\n /,\n *,\n axis: Optional[Union[Tuple[int], int]] = None,\n keepdims: Optional[bool] = False,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n if paddle.is_complex(input):\n ret = paddle.complex(\n paddle.median(input.real(), axis=axis, keepdim=True),\n paddle.median(input.imag(), axis=axis, keepdim=True),\n )\n else:\n ret = paddle.median(input, axis=axis, keepdim=True)\n # keepdims is set to True because in versions up to 2.5.1\n # there was a problem when the axis was defined, and it was the\n # only axis in the tensor, so it needs to be handled manually\n if not keepdims:\n ret = paddle_backend.squeeze(ret, axis=axis)\n # The following code is to simulate other frameworks\n # output shapes behaviour since min output dim is 1 in paddle\n if isinstance(axis, Sequence):\n if len(axis) == input.ndim:\n axis = None\n if (input.ndim == 1 or axis is None) and not keepdims:\n ret = ret.squeeze()\n return ret.astype(input.dtype)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"complex\", \"float32\", \"float64\", \"int64\")}, backend_version\n)\ndef nanmean(\n a: paddle.Tensor,\n /,\n *,\n axis: Optional[Union[int, Tuple[int]]] = None,\n keepdims: Optional[bool] = False,\n dtype: Optional[paddle.dtype] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n ret_dtype = dtype if dtype is not None else a.dtype\n a = a.cast(ret_dtype)\n if paddle.is_complex(a):\n ret = paddle.complex(\n paddle.nanmean(a.real(), axis=axis, keepdim=keepdims),\n paddle.nanmean(a.imag(), axis=axis, keepdim=keepdims),\n )\n else:\n ret = paddle.nanmean(a, axis=axis, keepdim=keepdims)\n\n # The following code is to simulate other frameworks\n # output shapes behavior since min output dim is 1 in paddle\n if isinstance(axis, Sequence):\n if len(axis) == a.ndim:\n axis = None\n if (a.ndim == 1 or axis is None) and not keepdims:\n ret = ret.squeeze()\n return ret.astype(ret_dtype)\n\n\ndef _infer_dtype(dtype: paddle.dtype):\n default_dtype = ivy.infer_default_dtype(dtype)\n if ivy.dtype_bits(dtype) < ivy.dtype_bits(default_dtype):\n return default_dtype\n return dtype\n\n\ndef _validate_quantile(q):\n if isinstance(q, float):\n q = paddle.to_tensor(q)\n if q.ndim == 1 and q.size < 10:\n for i in range(q.size):\n if not (0.0 <= q[i] <= 1.0):\n return False\n else:\n if not (paddle.all(0 <= q) and paddle.all(q <= 1)):\n return False\n return True\n\n\n@with_unsupported_device_and_dtypes(\n {\n \"2.5.1 and below\": {\n \"cpu\": (\n \"int8\",\n \"int16\",\n \"uint8\",\n \"float16\",\n \"bfloat16\",\n \"complex64\",\n \"complex128\",\n )\n }\n },\n backend_version,\n)\ndef nanmin(\n a: paddle.Tensor,\n /,\n *,\n axis: Optional[Union[int, Tuple[int]]] = None,\n keepdims: Optional[bool] = False,\n initial: Optional[Union[int, float, complex]] = None,\n where: Optional[paddle.Tensor] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n nan_mask = paddle.isnan(a)\n if where is not None:\n nan_mask = paddle.logical_or(nan_mask, paddle.logical_not(where))\n a_copy = a.clone()\n a_copy = paddle.where(nan_mask, paddle.full_like(a_copy, float(\"inf\")), a_copy)\n if axis is None:\n result = paddle.min(a_copy, keepdim=keepdims)\n else:\n result = paddle.min(a_copy, axis=axis, keepdim=keepdims)\n if initial is not None:\n initial = paddle.to_tensor(initial, dtype=a.dtype)\n result = paddle.minimum(result, initial)\n return result\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, backend_version)\ndef nanprod(\n a: paddle.Tensor,\n /,\n *,\n axis: Optional[Union[int, Tuple[int]]] = None,\n keepdims: Optional[bool] = False,\n dtype: Optional[paddle.dtype] = None,\n out: Optional[paddle.Tensor] = None,\n initial: Optional[Union[int, float, complex]] = None,\n where: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n dtype = ivy.as_native_dtype(dtype)\n if dtype is None:\n dtype = _infer_dtype(a.dtype)\n a = a.cast(dtype)\n if initial is None:\n initial = 1\n a = paddle.nan_to_num(a, nan=1.0)\n ret = paddle.prod(a, axis=axis, keepdim=keepdims) * initial\n\n if isinstance(axis, Sequence):\n if len(axis) == a.ndim:\n axis = None\n if (a.ndim == 1 or axis is None) and not keepdims:\n ret = ret.squeeze()\n return ret.cast(dtype)\n\n\ndef _to_positive_axis(axis, ndim):\n if not isinstance(axis, (list, tuple)):\n axis = [axis]\n\n if len(axis) == 0:\n raise ValueError(\"Axis can't be empty!\")\n\n if len(set(axis)) != len(axis):\n raise ValueError(\"Duplicated axis!\")\n\n for i in range(len(axis)):\n if not (isinstance(axis[i], int) and (ndim > axis[i] >= -ndim)):\n raise ValueError(\"Axis must be int in range [-rank(x), rank(x))\")\n if axis[i] < 0:\n axis[i] += ndim\n return axis\n\n\ndef _handle_axis(a, q, fn, keepdims=False, axis=None, interpolation=\"nearest\"):\n nd = a.ndim\n axis_arg = deepcopy(axis)\n if axis is not None:\n axis = _to_positive_axis(axis, nd)\n\n if len(axis) == 1:\n axis_arg = axis[0]\n else:\n keep = set(range(nd)) - set(axis)\n nkeep = len(keep)\n\n for i, s in enumerate(sorted(keep)):\n a = a.moveaxis(s, i)\n a = a.reshape(\n a.shape[:nkeep]\n + [\n -1,\n ]\n )\n axis_arg = -1\n\n ret = fn(a, q, axis=axis_arg, interpolation=interpolation)\n\n if keepdims:\n if axis is None:\n index_ret = (None,) * nd\n else:\n index_ret = tuple(None if i in axis else slice(None) for i in range(nd))\n ret = ret[(Ellipsis,) + index_ret]\n # if keepdims:\n # axis = axis if axis is not None else list(range(a.ndim))\n # ret = ret.unsqueeze(axis)\n return ret\n\n\ndef _quantile(a, q, axis=None, interpolation=\"nearest\"):\n if isinstance(q, float):\n q = paddle.to_tensor(q)\n ret_dtype = a.dtype\n if q.ndim > 1:\n raise ValueError(\"q argument must be a scalar or 1-dimensional!\")\n if axis is None:\n axis = 0\n a = paddle.flatten(a)\n elif axis != 0:\n a = a.moveaxis(axis, 0)\n axis = 0\n\n n = a.shape[axis]\n\n indices = q * (n - 1)\n\n a = paddle.sort(a, axis)\n\n if interpolation == \"lower\":\n indices = paddle.floor(indices)\n elif interpolation == \"higher\":\n indices = paddle.ceil(indices)\n elif interpolation == \"nearest\":\n indices = paddle.round(indices)\n elif interpolation == \"midpoint\":\n index_floor = paddle.floor(indices)\n index_ceil = paddle.ceil(indices)\n indices = (index_ceil + index_floor) / 2\n\n indices_below = paddle.floor(indices).astype(paddle.int32)\n indices_upper = paddle.ceil(indices).astype(paddle.int32)\n weights = indices - indices_below.astype(paddle.float64)\n if interpolation == \"nearest_jax\":\n indices_below = paddle.clip(indices_below, 0, n - 1)\n indices_upper = paddle.clip(indices_upper, 0, n - 1)\n tensor_upper = paddle.gather(a, indices_upper, axis=axis)\n tensor_below = paddle.gather(a, indices_below, axis=axis)\n\n pred = weights <= 0.5\n out = paddle.where(pred, tensor_below, tensor_upper)\n else:\n tensor_upper = paddle.gather(a, indices_upper, axis=axis)\n tensor_below = paddle.gather(a, indices_below, axis=axis)\n out = paddle.lerp(\n tensor_below.astype(paddle.float64),\n tensor_upper.astype(paddle.float64),\n weights.astype(paddle.float64),\n )\n\n return out.astype(ret_dtype)\n\n\ndef _compute_quantile_wrapper(\n x,\n q,\n axis=None,\n keepdims=False,\n interpolation=\"linear\",\n):\n if not _validate_quantile(q):\n raise ValueError(\"Quantiles must be in the range [0, 1]\")\n if interpolation not in [\n \"linear\",\n \"lower\",\n \"higher\",\n \"midpoint\",\n \"nearest\",\n \"nearest_jax\",\n ]:\n raise ValueError(\n \"Interpolation must be 'linear', 'lower', 'higher', 'midpoint' or 'nearest'\"\n )\n return _handle_axis(\n x,\n q,\n _quantile,\n keepdims=keepdims,\n axis=axis,\n interpolation=interpolation,\n )\n\n\n@with_unsupported_device_and_dtypes(\n {\n \"2.5.1 and below\": {\n \"cpu\": (\n \"int8\",\n \"int16\",\n \"uint8\",\n \"float16\",\n \"bfloat16\",\n \"complex64\",\n \"complex128\",\n )\n }\n },\n backend_version,\n)\ndef quantile(\n a: paddle.Tensor,\n q: Union[paddle.Tensor, float],\n /,\n *,\n axis: Optional[Union[Sequence[int], int]] = None,\n keepdims: Optional[bool] = False,\n interpolation: Optional[str] = \"linear\",\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n # added the nearest_jax mode to enable jax-like calculations for method=\"nearest\"\n return _compute_quantile_wrapper(\n x=a,\n q=q,\n axis=axis,\n keepdims=keepdims,\n interpolation=interpolation,\n )\n\n\ndef corrcoef(\n x: paddle.Tensor,\n /,\n *,\n y: Optional[paddle.Tensor] = None,\n rowvar: Optional[bool] = True,\n name: Optional[str] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n return paddle.linalg.corrcoef(\n x=x,\n rowvar=rowvar,\n name=name,\n )\n\n\ndef histogram(\n a: paddle.Tensor,\n /,\n *,\n bins: Optional[Union[int, paddle.Tensor]] = None,\n axis: Optional[int] = None,\n extend_lower_interval: Optional[bool] = False,\n extend_upper_interval: Optional[bool] = False,\n dtype: Optional[paddle.Tensor] = None,\n range: Optional[Tuple[float]] = None,\n weights: Optional[paddle.Tensor] = None,\n density: Optional[bool] = False,\n out: Optional[paddle.Tensor] = None,\n) -> Tuple[paddle.Tensor]:\n if range is None:\n min_range = 0\n max_range = 0\n else:\n min_range = range[0]\n max_range = range[1]\n return paddle.histogram(a, bins=bins, min=min_range, max=max_range)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, backend_version\n)\ndef nanmedian(\n input: paddle.Tensor,\n /,\n *,\n axis: Optional[Union[Tuple[int], int]] = None,\n keepdims: Optional[bool] = False,\n dtype: Optional[paddle.dtype] = None,\n overwrite_input: Optional[bool] = False,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n if dtype is None:\n dtype = input.dtype\n return paddle.nanmedian(x=input, axis=axis, keepdim=keepdims)\n\n\n@with_unsupported_device_and_dtypes(\n {\n \"2.5.1 and below\": {\n \"cpu\": (\n \"int8\",\n \"int16\",\n \"uint8\",\n \"float16\",\n \"bool\",\n )\n }\n },\n backend_version,\n)\ndef unravel_index(\n indices: paddle.Tensor,\n shape: Tuple[int],\n /,\n *,\n out: Optional[paddle.Tensor] = None,\n) -> tuple[Any, ...]:\n if indices.ndim == 0:\n indices = indices.unsqueeze(0)\n coord = []\n indices = indices\n for dim in reversed(shape):\n coord.append((indices % dim).astype(\"int32\"))\n indices = paddle.floor(indices / dim)\n\n return tuple(reversed(coord))\n\n\n@with_unsupported_device_and_dtypes(\n {\n \"2.5.1 and below\": {\n \"cpu\": (\n \"int8\",\n \"int16\",\n \"uint8\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"complex64\",\n \"complex128\",\n \"bool\",\n )\n }\n },\n backend_version,\n)\ndef bincount(\n x: paddle.Tensor,\n /,\n *,\n weights: Optional[paddle.Tensor] = None,\n minlength: int = 0,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n return paddle.bincount(x, weights=weights, minlength=minlength).cast(\n x.dtype if weights is None else weights.dtype\n )\n\n\ndef igamma(\n a: paddle.Tensor,\n /,\n *,\n x: paddle.Tensor,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n results = []\n ret_dtype = a.dtype if out is None else out.dtype\n if paddle.float16 in [a.dtype, x.dtype]:\n a = a.astype(\"float32\")\n x = x.astype(\"float32\")\n\n for ai, xi in zip(a.flatten(), x.flatten()):\n ai = ai.astype(\"float64\")\n xi = xi.astype(\"float64\")\n\n def integrand(t):\n return paddle.exp(-t) * paddle.pow(t, ai - 1)\n\n intervals = paddle.linspace(0, xi, 10001).astype(\"float64\")\n interval_width = xi / 10000\n values = integrand(intervals)\n integral = paddle.multiply((values[:-1] + values[1:]) / 2, interval_width)\n result = paddle.divide(paddle.sum(integral), paddle.exp(paddle.lgamma(ai)))\n results.append(result)\n\n return paddle.to_tensor(results, dtype=ret_dtype).reshape(a.shape)\n\n\ndef cov(\n x1: paddle.Tensor,\n x2: paddle.Tensor = None,\n /,\n *,\n rowVar: bool = True,\n bias: bool = False,\n ddof: Optional[int] = None,\n fweights: Optional[paddle.Tensor] = None,\n aweights: Optional[paddle.Tensor] = None,\n dtype: Optional[paddle.dtype] = None,\n) -> paddle.Tensor:\n if fweights is not None:\n fweights = fweights.astype(\"float64\")\n\n if aweights is not None:\n aweights = aweights.astype(\"float64\")\n\n if ddof is not None and ddof != int(ddof):\n raise ValueError(\"ddof must be an integer\")\n\n if len(x1.shape) > 2:\n raise ValueError(\"x1 has more than 2 dimensions\")\n\n if x2 is not None:\n if len(x2.shape) > 2:\n raise ValueError(\"x2 has more than 2 dimensions\")\n\n if ddof is None:\n if bias == 0:\n ddof = 1\n else:\n ddof = 0\n\n if dtype is None:\n x1 = x1.astype(\"float64\")\n if x2 is not None:\n x2 = x2.astype(\"float64\")\n else:\n x1 = x1.astype(dtype)\n if x2 is not None:\n x2 = x2.astype(dtype)\n\n X = x1\n if not rowVar and X.shape[0] != 1:\n X = paddle.transpose(X, perm=tuple(range(len(X.shape) - 1, -1, -1)))\n\n if x2 is not None:\n if not rowVar and x2.shape[0] != 1:\n x2 = paddle.transpose(x2, perm=tuple(range(len(x2.shape) - 1, -1, -1)))\n if len(x2.shape) > 1:\n X = paddle.concat([X, x2], axis=0)\n else:\n X = paddle.stack([X, x2], axis=0)\n\n if not rowVar:\n X = paddle.transpose(X, perm=tuple(range(len(X.shape) - 1, -1, -1)))\n\n return paddle.linalg.cov(\n X, rowvar=rowVar, ddof=ddof, fweights=fweights, aweights=aweights\n )\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"complex\", \"bool\", \"float32\", \"float64\")},\n backend_version,\n)\ndef cummax(\n x: paddle.Tensor,\n /,\n *,\n axis: int = 0,\n exclusive: bool = False,\n reverse: bool = False,\n dtype: Optional[paddle.dtype] = None,\n out: Optional[paddle.Tensor] = None,\n) -> Tuple[paddle.Tensor, paddle.Tensor]:\n if x.dtype in (paddle.complex128, paddle.complex64):\n x = x.real()\n\n if not (exclusive or reverse):\n return __find_cummax(x, axis=axis)\n\n elif exclusive and reverse:\n x, indices = __find_cummax(ivy.flip(x, axis=(axis,)), axis=axis)\n x, indices = ivy.swapaxes(x, axis, -1), ivy.swapaxes(indices, axis, -1)\n x = ivy.concat((ivy.zeros_like(x[..., -1:]), x[..., :-1]), axis=-1)\n indices = ivy.concat(\n (ivy.zeros_like(indices[..., -1:]), indices[..., :-1]), axis=-1\n )\n x, indices = ivy.swapaxes(x, axis, -1), ivy.swapaxes(indices, axis, -1)\n return ivy.flip(x, axis=(axis,)), ivy.flip(indices, axis=(axis,))\n\n elif exclusive:\n x = ivy.swapaxes(x, axis, -1)\n x = ivy.concat((ivy.zeros_like(x[..., -1:]), x[..., :-1]), axis=-1)\n x = ivy.swapaxes(x, axis, -1)\n x, indices = __find_cummax(x, axis=axis)\n\n return x, indices\n\n else:\n x, indices = __find_cummax(ivy.flip(x, axis=(axis,)), axis=axis)\n return ivy.flip(x, axis=axis), ivy.flip(indices, axis=axis)\n\n\ndef __find_cummax(\n x: paddle.Tensor, axis: int = 0, dtype: Optional[paddle.dtype] = None\n) -> Tuple[paddle.Tensor, paddle.Tensor]:\n indices = []\n values = []\n x_dtype = x.dtype if dtype is None else dtype\n if (\n isinstance(x.tolist()[0], list)\n and len(x[0].shape) >= 1\n and (isinstance(x[0], paddle.Tensor) or isinstance(x[0], ivy.Array))\n ):\n if axis >= 1:\n if not isinstance(x, list):\n x = x.tolist()\n for ret1 in x:\n value, indice = __find_cummax(\n paddle.to_tensor(ret1, dtype=x_dtype), axis=axis - 1, dtype=x_dtype\n )\n indices.append(indice)\n values.append(value)\n else:\n x_list = x.numpy()\n z_list = __get_index(x_list.tolist())\n indices, values, n1 = x_list.copy(), x_list.copy(), {}\n indices.fill(0)\n values.fill(0)\n z_list = sorted(z_list, key=lambda i: i[1])\n for y, y_index in z_list:\n multi_index = y_index\n if tuple(multi_index[1:]) not in n1:\n n1[tuple(multi_index[1:])] = multi_index[0]\n indices[y_index] = multi_index[0]\n values[y_index] = y\n elif (\n y\n >= x_list[\n tuple([n1[tuple(multi_index[1:])]] + list(multi_index[1:]))\n ]\n ):\n n1[tuple(multi_index[1:])] = multi_index[0]\n indices[y_index] = multi_index[0]\n values[y_index] = y\n else:\n indices[y_index] = n1[tuple(multi_index[1:])]\n values[y_index] = x_list[\n tuple([n1[tuple(multi_index[1:])]] + list(multi_index[1:]))\n ]\n else:\n if not isinstance(x, list):\n x = x.tolist()\n n = 0\n for idx, y in enumerate(x):\n if x[n] > y:\n values.append(x[n])\n elif x[n] <= y or idx == 0:\n n = idx\n values.append(y)\n indices.append(n)\n\n if isinstance(x, paddle.Tensor):\n return paddle.to_tensor(values, dtype=x.dtype), paddle.to_tensor(\n indices, dtype=\"int64\"\n )\n else:\n return ivy.array(values, dtype=x_dtype), ivy.array(indices, dtype=\"int64\")\n\n\ndef __get_index(lst, indices=None, prefix=None):\n if indices is None:\n indices = []\n if prefix is None:\n prefix = []\n\n if isinstance(lst, list):\n for i, sub_lst in enumerate(lst):\n sub_indices = prefix + [i]\n __get_index(sub_lst, indices, sub_indices)\n else:\n indices.append((lst, tuple(prefix)))\n return indices\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.5.1 and below\": {\"cpu\": (\"uint8\", \"int8\", \"int16\")}},\n backend_version,\n)\ndef cummin(\n x: paddle.Tensor,\n /,\n *,\n axis: int = 0,\n exclusive: bool = False,\n reverse: bool = False,\n dtype: Optional[paddle.dtype] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n dtype = dtype if dtype is not None else x.dtype\n if reverse:\n x = paddle.flip(x, axis=[axis])\n x_unstacked = paddle.unbind(x, axis=axis)\n cummin_x_unstacked = []\n cummin_x_unstacked.append(x_unstacked[0])\n for i, x_sub in enumerate(x_unstacked[1:]):\n cummin_x_sub = paddle.minimum(cummin_x_unstacked[i], x_sub)\n cummin_x_unstacked.append(cummin_x_sub)\n cummin_x = paddle.stack(cummin_x_unstacked, axis=axis)\n if reverse:\n cummin_x = paddle.flip(cummin_x, axis=[axis])\n return cummin_x.cast(dtype)\n",
"path": "ivy/functional/backends/paddle/experimental/statistical.py"
}
] | [
{
"content": "# global\nfrom typing import Optional, Union, Tuple, Sequence, Any\nimport paddle\nimport ivy.functional.backends.paddle as paddle_backend\nimport ivy\nfrom copy import deepcopy\n\n# local\nfrom ivy.func_wrapper import (\n with_unsupported_device_and_dtypes,\n with_supported_dtypes,\n)\nfrom . import backend_version\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"complex\", \"float32\", \"float64\", \"int32\", \"int64\")},\n backend_version,\n)\ndef median(\n input: paddle.Tensor,\n /,\n *,\n axis: Optional[Union[Tuple[int], int]] = None,\n keepdims: Optional[bool] = False,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n if paddle.is_complex(input):\n ret = paddle.complex(\n paddle.median(input.real(), axis=axis, keepdim=True),\n paddle.median(input.imag(), axis=axis, keepdim=True),\n )\n else:\n ret = paddle.median(input, axis=axis, keepdim=True)\n # keepdims is set to True because in versions up to 2.5.1\n # there was a problem when the axis was defined, and it was the\n # only axis in the tensor, so it needs to be handled manually\n if not keepdims:\n ret = paddle_backend.squeeze(ret, axis=axis)\n # The following code is to simulate other frameworks\n # output shapes behaviour since min output dim is 1 in paddle\n if isinstance(axis, Sequence):\n if len(axis) == input.ndim:\n axis = None\n if (input.ndim == 1 or axis is None) and not keepdims:\n ret = ret.squeeze()\n return ret.astype(input.dtype)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"complex\", \"float32\", \"float64\", \"int64\")}, backend_version\n)\ndef nanmean(\n a: paddle.Tensor,\n /,\n *,\n axis: Optional[Union[int, Tuple[int]]] = None,\n keepdims: Optional[bool] = False,\n dtype: Optional[paddle.dtype] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n ret_dtype = dtype if dtype is not None else a.dtype\n a = a.cast(ret_dtype)\n if paddle.is_complex(a):\n ret = paddle.complex(\n paddle.nanmean(a.real(), axis=axis, keepdim=keepdims),\n paddle.nanmean(a.imag(), axis=axis, keepdim=keepdims),\n )\n else:\n ret = paddle.nanmean(a, axis=axis, keepdim=keepdims)\n\n # The following code is to simulate other frameworks\n # output shapes behavior since min output dim is 1 in paddle\n if isinstance(axis, Sequence):\n if len(axis) == a.ndim:\n axis = None\n if (a.ndim == 1 or axis is None) and not keepdims:\n ret = ret.squeeze()\n return ret.astype(ret_dtype)\n\n\ndef _infer_dtype(dtype: paddle.dtype):\n default_dtype = ivy.infer_default_dtype(dtype)\n if ivy.dtype_bits(dtype) < ivy.dtype_bits(default_dtype):\n return default_dtype\n return dtype\n\n\ndef _validate_quantile(q):\n if isinstance(q, float):\n q = paddle.to_tensor(q)\n if q.ndim == 1 and q.size < 10:\n for i in range(q.size):\n if not (0.0 <= q[i] <= 1.0):\n return False\n else:\n if not (paddle.all(0 <= q) and paddle.all(q <= 1)):\n return False\n return True\n\n\n@with_unsupported_device_and_dtypes(\n {\n \"2.5.1 and below\": {\n \"cpu\": (\n \"int8\",\n \"int16\",\n \"uint8\",\n \"float16\",\n \"bfloat16\",\n \"complex64\",\n \"complex128\",\n )\n }\n },\n backend_version,\n)\ndef nanmin(\n a: paddle.Tensor,\n /,\n *,\n axis: Optional[Union[int, Tuple[int]]] = None,\n keepdims: Optional[bool] = False,\n initial: Optional[Union[int, float, complex]] = None,\n where: Optional[paddle.Tensor] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n nan_mask = paddle.isnan(a)\n if where is not None:\n nan_mask = paddle.logical_or(nan_mask, paddle.logical_not(where))\n a_copy = a.clone()\n a_copy = paddle.where(nan_mask, paddle.full_like(a_copy, float(\"inf\")), a_copy)\n if axis is None:\n result = paddle.min(a_copy, keepdim=keepdims)\n else:\n result = paddle.min(a_copy, axis=axis, keepdim=keepdims)\n if initial is not None:\n initial = paddle.to_tensor(initial, dtype=a.dtype)\n result = paddle.minimum(result, initial)\n return result\n\n\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, backend_version)\ndef nanprod(\n a: paddle.Tensor,\n /,\n *,\n axis: Optional[Union[int, Tuple[int]]] = None,\n keepdims: Optional[bool] = False,\n dtype: Optional[paddle.dtype] = None,\n out: Optional[paddle.Tensor] = None,\n initial: Optional[Union[int, float, complex]] = None,\n where: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n dtype = ivy.as_native_dtype(dtype)\n if dtype is None:\n dtype = _infer_dtype(a.dtype)\n a = a.cast(dtype)\n if initial is None:\n initial = 1\n a = paddle.nan_to_num(a, nan=1.0)\n ret = paddle.prod(a, axis=axis, keepdim=keepdims) * initial\n\n if isinstance(axis, Sequence):\n if len(axis) == a.ndim:\n axis = None\n if (a.ndim == 1 or axis is None) and not keepdims:\n ret = ret.squeeze()\n return ret.cast(dtype)\n\n\ndef _to_positive_axis(axis, ndim):\n if not isinstance(axis, (list, tuple)):\n axis = [axis]\n\n if len(axis) == 0:\n raise ValueError(\"Axis can't be empty!\")\n\n if len(set(axis)) != len(axis):\n raise ValueError(\"Duplicated axis!\")\n\n for i in range(len(axis)):\n if not (isinstance(axis[i], int) and (ndim > axis[i] >= -ndim)):\n raise ValueError(\"Axis must be int in range [-rank(x), rank(x))\")\n if axis[i] < 0:\n axis[i] += ndim\n return axis\n\n\ndef _handle_axis(a, q, fn, keepdims=False, axis=None, interpolation=\"nearest\"):\n nd = a.ndim\n axis_arg = deepcopy(axis)\n if axis is not None:\n axis = _to_positive_axis(axis, nd)\n\n if len(axis) == 1:\n axis_arg = axis[0]\n else:\n keep = set(range(nd)) - set(axis)\n nkeep = len(keep)\n\n for i, s in enumerate(sorted(keep)):\n a = a.moveaxis(s, i)\n a = a.reshape(\n a.shape[:nkeep]\n + [\n -1,\n ]\n )\n axis_arg = -1\n\n ret = fn(a, q, axis=axis_arg, interpolation=interpolation)\n\n if keepdims:\n if axis is None:\n index_ret = (None,) * nd\n else:\n index_ret = tuple(None if i in axis else slice(None) for i in range(nd))\n ret = ret[(Ellipsis,) + index_ret]\n # if keepdims:\n # axis = axis if axis is not None else list(range(a.ndim))\n # ret = ret.unsqueeze(axis)\n return ret\n\n\ndef _quantile(a, q, axis=None, interpolation=\"nearest\"):\n if isinstance(q, float):\n q = paddle.to_tensor(q)\n ret_dtype = a.dtype\n if q.ndim > 1:\n raise ValueError(\"q argument must be a scalar or 1-dimensional!\")\n if axis is None:\n axis = 0\n a = paddle.flatten(a)\n elif axis != 0:\n a = a.moveaxis(axis, 0)\n axis = 0\n\n n = a.shape[axis]\n\n indices = q * (n - 1)\n\n a = paddle.sort(a, axis)\n\n if interpolation == \"lower\":\n indices = paddle.floor(indices)\n elif interpolation == \"higher\":\n indices = paddle.ceil(indices)\n elif interpolation == \"nearest\":\n indices = paddle.round(indices)\n elif interpolation == \"midpoint\":\n index_floor = paddle.floor(indices)\n index_ceil = paddle.ceil(indices)\n indices = (index_ceil + index_floor) / 2\n\n indices_below = paddle.floor(indices).astype(paddle.int32)\n indices_upper = paddle.ceil(indices).astype(paddle.int32)\n weights = indices - indices_below.astype(paddle.float64)\n if interpolation == \"nearest_jax\":\n indices_below = paddle.clip(indices_below, 0, n - 1)\n indices_upper = paddle.clip(indices_upper, 0, n - 1)\n tensor_upper = paddle.gather(a, indices_upper, axis=axis)\n tensor_below = paddle.gather(a, indices_below, axis=axis)\n\n pred = weights <= 0.5\n out = paddle.where(pred, tensor_below, tensor_upper)\n else:\n tensor_upper = paddle.gather(a, indices_upper, axis=axis)\n tensor_below = paddle.gather(a, indices_below, axis=axis)\n out = paddle.lerp(\n tensor_below.astype(paddle.float64),\n tensor_upper.astype(paddle.float64),\n weights.astype(paddle.float64),\n )\n\n return out.astype(ret_dtype)\n\n\ndef _compute_quantile_wrapper(\n x,\n q,\n axis=None,\n keepdims=False,\n interpolation=\"linear\",\n):\n if not _validate_quantile(q):\n raise ValueError(\"Quantiles must be in the range [0, 1]\")\n if interpolation not in [\n \"linear\",\n \"lower\",\n \"higher\",\n \"midpoint\",\n \"nearest\",\n \"nearest_jax\",\n ]:\n raise ValueError(\n \"Interpolation must be 'linear', 'lower', 'higher', 'midpoint' or 'nearest'\"\n )\n return _handle_axis(\n x,\n q,\n _quantile,\n keepdims=keepdims,\n axis=axis,\n interpolation=interpolation,\n )\n\n\n@with_unsupported_device_and_dtypes(\n {\n \"2.5.1 and below\": {\n \"cpu\": (\n \"int8\",\n \"int16\",\n \"uint8\",\n \"float16\",\n \"bfloat16\",\n \"complex64\",\n \"complex128\",\n )\n }\n },\n backend_version,\n)\ndef quantile(\n a: paddle.Tensor,\n q: Union[paddle.Tensor, float],\n /,\n *,\n axis: Optional[Union[Sequence[int], int]] = None,\n keepdims: Optional[bool] = False,\n interpolation: Optional[str] = \"linear\",\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n # added the nearest_jax mode to enable jax-like calculations for method=\"nearest\"\n return _compute_quantile_wrapper(\n x=a,\n q=q,\n axis=axis,\n keepdims=keepdims,\n interpolation=interpolation,\n )\n\n\ndef corrcoef(\n x: paddle.Tensor,\n /,\n *,\n y: Optional[paddle.Tensor] = None,\n rowvar: Optional[bool] = True,\n name: Optional[str] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n return paddle.linalg.corrcoef(\n x=x,\n rowvar=rowvar,\n name=name,\n )\n\n\ndef histogram(\n a: paddle.Tensor,\n /,\n *,\n bins: Optional[Union[int, paddle.Tensor]] = None,\n axis: Optional[int] = None,\n extend_lower_interval: Optional[bool] = False,\n extend_upper_interval: Optional[bool] = False,\n dtype: Optional[paddle.Tensor] = None,\n range: Optional[Tuple[float]] = None,\n weights: Optional[paddle.Tensor] = None,\n density: Optional[bool] = False,\n out: Optional[paddle.Tensor] = None,\n) -> Tuple[paddle.Tensor]:\n if range is None:\n min_range = 0\n max_range = 0\n else:\n min_range = range[0]\n max_range = range[1]\n return paddle.histogram(a, bins=bins, min=min_range, max=max_range)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, backend_version\n)\ndef nanmedian(\n input: paddle.Tensor,\n /,\n *,\n axis: Optional[Union[Tuple[int], int]] = None,\n keepdims: Optional[bool] = False,\n dtype: Optional[paddle.dtype] = None,\n overwrite_input: Optional[bool] = False,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n if dtype is None:\n dtype = input.dtype\n return paddle.nanmedian(x=input, axis=axis, keepdim=keepdims)\n\n\n@with_unsupported_device_and_dtypes(\n {\n \"2.5.1 and below\": {\n \"cpu\": (\n \"int8\",\n \"int16\",\n \"uint8\",\n \"float16\",\n \"bool\",\n )\n }\n },\n backend_version,\n)\ndef unravel_index(\n indices: paddle.Tensor,\n shape: Tuple[int],\n /,\n *,\n out: Optional[paddle.Tensor] = None,\n) -> Tuple[Any, ...]:\n if indices.ndim == 0:\n indices = indices.unsqueeze(0)\n coord = []\n indices = indices\n for dim in reversed(shape):\n coord.append((indices % dim).astype(\"int32\"))\n indices = paddle.floor(indices / dim)\n\n return tuple(reversed(coord))\n\n\n@with_unsupported_device_and_dtypes(\n {\n \"2.5.1 and below\": {\n \"cpu\": (\n \"int8\",\n \"int16\",\n \"uint8\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"complex64\",\n \"complex128\",\n \"bool\",\n )\n }\n },\n backend_version,\n)\ndef bincount(\n x: paddle.Tensor,\n /,\n *,\n weights: Optional[paddle.Tensor] = None,\n minlength: int = 0,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n return paddle.bincount(x, weights=weights, minlength=minlength).cast(\n x.dtype if weights is None else weights.dtype\n )\n\n\ndef igamma(\n a: paddle.Tensor,\n /,\n *,\n x: paddle.Tensor,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n results = []\n ret_dtype = a.dtype if out is None else out.dtype\n if paddle.float16 in [a.dtype, x.dtype]:\n a = a.astype(\"float32\")\n x = x.astype(\"float32\")\n\n for ai, xi in zip(a.flatten(), x.flatten()):\n ai = ai.astype(\"float64\")\n xi = xi.astype(\"float64\")\n\n def integrand(t):\n return paddle.exp(-t) * paddle.pow(t, ai - 1)\n\n intervals = paddle.linspace(0, xi, 10001).astype(\"float64\")\n interval_width = xi / 10000\n values = integrand(intervals)\n integral = paddle.multiply((values[:-1] + values[1:]) / 2, interval_width)\n result = paddle.divide(paddle.sum(integral), paddle.exp(paddle.lgamma(ai)))\n results.append(result)\n\n return paddle.to_tensor(results, dtype=ret_dtype).reshape(a.shape)\n\n\ndef cov(\n x1: paddle.Tensor,\n x2: paddle.Tensor = None,\n /,\n *,\n rowVar: bool = True,\n bias: bool = False,\n ddof: Optional[int] = None,\n fweights: Optional[paddle.Tensor] = None,\n aweights: Optional[paddle.Tensor] = None,\n dtype: Optional[paddle.dtype] = None,\n) -> paddle.Tensor:\n if fweights is not None:\n fweights = fweights.astype(\"float64\")\n\n if aweights is not None:\n aweights = aweights.astype(\"float64\")\n\n if ddof is not None and ddof != int(ddof):\n raise ValueError(\"ddof must be an integer\")\n\n if len(x1.shape) > 2:\n raise ValueError(\"x1 has more than 2 dimensions\")\n\n if x2 is not None:\n if len(x2.shape) > 2:\n raise ValueError(\"x2 has more than 2 dimensions\")\n\n if ddof is None:\n if bias == 0:\n ddof = 1\n else:\n ddof = 0\n\n if dtype is None:\n x1 = x1.astype(\"float64\")\n if x2 is not None:\n x2 = x2.astype(\"float64\")\n else:\n x1 = x1.astype(dtype)\n if x2 is not None:\n x2 = x2.astype(dtype)\n\n X = x1\n if not rowVar and X.shape[0] != 1:\n X = paddle.transpose(X, perm=tuple(range(len(X.shape) - 1, -1, -1)))\n\n if x2 is not None:\n if not rowVar and x2.shape[0] != 1:\n x2 = paddle.transpose(x2, perm=tuple(range(len(x2.shape) - 1, -1, -1)))\n if len(x2.shape) > 1:\n X = paddle.concat([X, x2], axis=0)\n else:\n X = paddle.stack([X, x2], axis=0)\n\n if not rowVar:\n X = paddle.transpose(X, perm=tuple(range(len(X.shape) - 1, -1, -1)))\n\n return paddle.linalg.cov(\n X, rowvar=rowVar, ddof=ddof, fweights=fweights, aweights=aweights\n )\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"complex\", \"bool\", \"float32\", \"float64\")},\n backend_version,\n)\ndef cummax(\n x: paddle.Tensor,\n /,\n *,\n axis: int = 0,\n exclusive: bool = False,\n reverse: bool = False,\n dtype: Optional[paddle.dtype] = None,\n out: Optional[paddle.Tensor] = None,\n) -> Tuple[paddle.Tensor, paddle.Tensor]:\n if x.dtype in (paddle.complex128, paddle.complex64):\n x = x.real()\n\n if not (exclusive or reverse):\n return __find_cummax(x, axis=axis)\n\n elif exclusive and reverse:\n x, indices = __find_cummax(ivy.flip(x, axis=(axis,)), axis=axis)\n x, indices = ivy.swapaxes(x, axis, -1), ivy.swapaxes(indices, axis, -1)\n x = ivy.concat((ivy.zeros_like(x[..., -1:]), x[..., :-1]), axis=-1)\n indices = ivy.concat(\n (ivy.zeros_like(indices[..., -1:]), indices[..., :-1]), axis=-1\n )\n x, indices = ivy.swapaxes(x, axis, -1), ivy.swapaxes(indices, axis, -1)\n return ivy.flip(x, axis=(axis,)), ivy.flip(indices, axis=(axis,))\n\n elif exclusive:\n x = ivy.swapaxes(x, axis, -1)\n x = ivy.concat((ivy.zeros_like(x[..., -1:]), x[..., :-1]), axis=-1)\n x = ivy.swapaxes(x, axis, -1)\n x, indices = __find_cummax(x, axis=axis)\n\n return x, indices\n\n else:\n x, indices = __find_cummax(ivy.flip(x, axis=(axis,)), axis=axis)\n return ivy.flip(x, axis=axis), ivy.flip(indices, axis=axis)\n\n\ndef __find_cummax(\n x: paddle.Tensor, axis: int = 0, dtype: Optional[paddle.dtype] = None\n) -> Tuple[paddle.Tensor, paddle.Tensor]:\n indices = []\n values = []\n x_dtype = x.dtype if dtype is None else dtype\n if (\n isinstance(x.tolist()[0], list)\n and len(x[0].shape) >= 1\n and (isinstance(x[0], paddle.Tensor) or isinstance(x[0], ivy.Array))\n ):\n if axis >= 1:\n if not isinstance(x, list):\n x = x.tolist()\n for ret1 in x:\n value, indice = __find_cummax(\n paddle.to_tensor(ret1, dtype=x_dtype), axis=axis - 1, dtype=x_dtype\n )\n indices.append(indice)\n values.append(value)\n else:\n x_list = x.numpy()\n z_list = __get_index(x_list.tolist())\n indices, values, n1 = x_list.copy(), x_list.copy(), {}\n indices.fill(0)\n values.fill(0)\n z_list = sorted(z_list, key=lambda i: i[1])\n for y, y_index in z_list:\n multi_index = y_index\n if tuple(multi_index[1:]) not in n1:\n n1[tuple(multi_index[1:])] = multi_index[0]\n indices[y_index] = multi_index[0]\n values[y_index] = y\n elif (\n y\n >= x_list[\n tuple([n1[tuple(multi_index[1:])]] + list(multi_index[1:]))\n ]\n ):\n n1[tuple(multi_index[1:])] = multi_index[0]\n indices[y_index] = multi_index[0]\n values[y_index] = y\n else:\n indices[y_index] = n1[tuple(multi_index[1:])]\n values[y_index] = x_list[\n tuple([n1[tuple(multi_index[1:])]] + list(multi_index[1:]))\n ]\n else:\n if not isinstance(x, list):\n x = x.tolist()\n n = 0\n for idx, y in enumerate(x):\n if x[n] > y:\n values.append(x[n])\n elif x[n] <= y or idx == 0:\n n = idx\n values.append(y)\n indices.append(n)\n\n if isinstance(x, paddle.Tensor):\n return paddle.to_tensor(values, dtype=x.dtype), paddle.to_tensor(\n indices, dtype=\"int64\"\n )\n else:\n return ivy.array(values, dtype=x_dtype), ivy.array(indices, dtype=\"int64\")\n\n\ndef __get_index(lst, indices=None, prefix=None):\n if indices is None:\n indices = []\n if prefix is None:\n prefix = []\n\n if isinstance(lst, list):\n for i, sub_lst in enumerate(lst):\n sub_indices = prefix + [i]\n __get_index(sub_lst, indices, sub_indices)\n else:\n indices.append((lst, tuple(prefix)))\n return indices\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.5.1 and below\": {\"cpu\": (\"uint8\", \"int8\", \"int16\")}},\n backend_version,\n)\ndef cummin(\n x: paddle.Tensor,\n /,\n *,\n axis: int = 0,\n exclusive: bool = False,\n reverse: bool = False,\n dtype: Optional[paddle.dtype] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n dtype = dtype if dtype is not None else x.dtype\n if reverse:\n x = paddle.flip(x, axis=[axis])\n x_unstacked = paddle.unbind(x, axis=axis)\n cummin_x_unstacked = []\n cummin_x_unstacked.append(x_unstacked[0])\n for i, x_sub in enumerate(x_unstacked[1:]):\n cummin_x_sub = paddle.minimum(cummin_x_unstacked[i], x_sub)\n cummin_x_unstacked.append(cummin_x_sub)\n cummin_x = paddle.stack(cummin_x_unstacked, axis=axis)\n if reverse:\n cummin_x = paddle.flip(cummin_x, axis=[axis])\n return cummin_x.cast(dtype)\n",
"path": "ivy/functional/backends/paddle/experimental/statistical.py"
}
] | diff --git a/ivy/functional/backends/paddle/experimental/statistical.py b/ivy/functional/backends/paddle/experimental/statistical.py
index 6ecfaa16caf5d..84ed593da8fd9 100644
--- a/ivy/functional/backends/paddle/experimental/statistical.py
+++ b/ivy/functional/backends/paddle/experimental/statistical.py
@@ -419,7 +419,7 @@ def unravel_index(
/,
*,
out: Optional[paddle.Tensor] = None,
-) -> tuple[Any, ...]:
+) -> Tuple[Any, ...]:
if indices.ndim == 0:
indices = indices.unsqueeze(0)
coord = []
|
ibis-project__ibis-2055 | BUG: [omniscidb] add OSError as an possible exception when importing GPUDataFrame
When using ibis where cudf is installed, but if it is not running on GPU, it raises this error:
```
/opt/conda/lib/python3.7/site-packages/ibis/omniscidb/client.py in <module>
26
27 try:
---> 28 from cudf.dataframe.dataframe import DataFrame as GPUDataFrame
29 except ImportError:
30 GPUDataFrame = None
...
OSError: cannot load library '/opt/conda/lib/librmm.so': /usr/lib/x86_64-linux-gnu/libcuda.so.1: file too short
```
| [
{
"content": "\"\"\"Ibis OmniSciDB Client.\"\"\"\nimport pandas as pd\nimport pkg_resources\nimport pymapd\nimport regex as re\nfrom pymapd._parsers import _extract_column_details\nfrom pymapd.cursor import Cursor\nfrom pymapd.dtypes import TDatumType as pymapd_dtype\n\nimport ibis.common.exceptions as com\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.schema as sch\nimport ibis.expr.types as ir\nfrom ibis.client import Database, DatabaseEntity, Query, SQLClient\nfrom ibis.omniscidb import ddl\nfrom ibis.omniscidb.compiler import OmniSciDBDialect, build_ast\nfrom ibis.sql.compiler import DDL, DML\nfrom ibis.util import log\n\ntry:\n from cudf.dataframe.dataframe import DataFrame as GPUDataFrame\nexcept ImportError:\n GPUDataFrame = None\n\n# used to check if geopandas and shapely is available\nFULL_GEO_SUPPORTED = False\ntry:\n import geopandas\n import shapely.wkt\n\n FULL_GEO_SUPPORTED = True\nexcept ImportError:\n ...\n\nEXECUTION_TYPE_ICP = 1\nEXECUTION_TYPE_ICP_GPU = 2\nEXECUTION_TYPE_CURSOR = 3\n\nfully_qualified_re = re.compile(r\"(.*)\\.(?:`(.*)`|(.*))\")\n\n\ndef _validate_compatible(from_schema, to_schema):\n if set(from_schema.names) != set(to_schema.names):\n raise com.IbisInputError('Schemas have different names')\n\n for name in from_schema:\n lt = from_schema[name]\n rt = to_schema[name]\n if not lt.castable(rt):\n raise com.IbisInputError(\n 'Cannot safely cast {0!r} to {1!r}'.format(lt, rt)\n )\n return\n\n\nclass PyMapDVersionError(Exception):\n \"\"\"PyMapD version error exception.\"\"\"\n\n pass\n\n\nclass OmniSciDBDataType:\n \"\"\"OmniSciDB Backend Data Type.\"\"\"\n\n __slots__ = 'typename', 'nullable'\n\n # using impala.client._HS2_TTypeId_to_dtype as reference\n dtypes = {\n 'BIGINT': dt.int64,\n 'BOOL': dt.Boolean,\n 'DATE': dt.date,\n 'DECIMAL': dt.Decimal(18, 9),\n 'DOUBLE': dt.double,\n 'FLOAT': dt.float32,\n 'INT': dt.int32,\n 'LINESTRING': dt.linestring,\n 'MULTIPOLYGON': dt.multipolygon,\n 'NULL': dt.Null,\n 'NUMERIC': dt.Decimal(18, 9),\n 'POINT': dt.point,\n 'POLYGON': dt.polygon,\n 'SMALLINT': dt.int16,\n 'STR': dt.string,\n 'TIME': dt.time,\n 'TIMESTAMP': dt.timestamp,\n 'TINYINT': dt.int8,\n }\n\n ibis_dtypes = {v: k for k, v in dtypes.items()}\n\n _omniscidb_to_ibis_dtypes = {\n 'BIGINT': 'int64',\n 'BOOLEAN': 'Boolean',\n 'BOOL': 'Boolean',\n 'CHAR': 'string',\n 'DATE': 'date',\n 'DECIMAL': 'decimal',\n 'DOUBLE': 'double',\n 'INT': 'int32',\n 'INTEGER': 'int32',\n 'FLOAT': 'float32',\n 'NUMERIC': 'float64',\n 'REAL': 'float32',\n 'SMALLINT': 'int16',\n 'STR': 'string',\n 'TEXT': 'string',\n 'TIME': 'time',\n 'TIMESTAMP': 'timestamp',\n 'VARCHAR': 'string',\n 'POINT': 'point',\n 'LINESTRING': 'linestring',\n 'POLYGON': 'polygon',\n 'MULTIPOLYGON': 'multipolygon',\n }\n\n def __init__(self, typename, nullable=True):\n if typename not in self.dtypes:\n raise com.UnsupportedBackendType(typename)\n self.typename = typename\n self.nullable = nullable\n\n def __str__(self):\n \"\"\"Return the data type name.\"\"\"\n if self.nullable:\n return 'Nullable({})'.format(self.typename)\n else:\n return self.typename\n\n def __repr__(self):\n \"\"\"Return the backend name and the datatype name.\"\"\"\n return '<OmniSciDB {}>'.format(str(self))\n\n @classmethod\n def parse(cls, spec: str):\n \"\"\"Return a OmniSciDBDataType related to the given data type name.\n\n Parameters\n ----------\n spec : string\n\n Returns\n -------\n OmniSciDBDataType\n \"\"\"\n if spec.startswith('Nullable'):\n return cls(spec[9:-1], nullable=True)\n else:\n return cls(spec)\n\n def to_ibis(self):\n \"\"\"\n Return the Ibis data type correspondent to the current OmniSciDB type.\n\n Returns\n -------\n ibis.expr.datatypes.DataType\n \"\"\"\n return self.dtypes[self.typename](nullable=self.nullable)\n\n @classmethod\n def from_ibis(cls, dtype, nullable=None):\n \"\"\"\n Return a OmniSciDBDataType correspondent to the given Ibis data type.\n\n Parameters\n ----------\n dtype : ibis.expr.datatypes.DataType\n nullable : bool\n\n Returns\n -------\n OmniSciDBDataType\n\n Raises\n ------\n NotImplementedError\n if the given data type was not implemented.\n \"\"\"\n dtype_ = type(dtype)\n if dtype_ in cls.ibis_dtypes:\n typename = cls.ibis_dtypes[dtype_]\n elif dtype in cls.ibis_dtypes:\n typename = cls.ibis_dtypes[dtype]\n else:\n raise NotImplementedError('{} dtype not implemented'.format(dtype))\n\n if nullable is None:\n nullable = dtype.nullable\n return cls(typename, nullable=nullable)\n\n\nclass OmniSciDBDefaultCursor:\n \"\"\"Default cursor that exports a result to Pandas Data Frame.\"\"\"\n\n def __init__(self, cursor):\n self.cursor = cursor\n\n def to_df(self):\n \"\"\"Convert the cursor to a data frame.\n\n Returns\n -------\n dataframe : pandas.DataFrame\n \"\"\"\n if isinstance(self.cursor, Cursor):\n col_names = [c.name for c in self.cursor.description]\n result = pd.DataFrame(self.cursor.fetchall(), columns=col_names)\n elif self.cursor is None:\n result = pd.DataFrame([])\n else:\n result = self.cursor\n\n return result\n\n def __enter__(self):\n \"\"\"For compatibility when constructed from Query.execute().\"\"\"\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Exit when using `with` statement.\"\"\"\n pass\n\n\nclass OmniSciDBGeoCursor(OmniSciDBDefaultCursor):\n \"\"\"Cursor that exports result to GeoPandas Data Frame.\"\"\"\n\n def to_df(self):\n \"\"\"Convert the cursor to a data frame.\n\n Returns\n -------\n dataframe : pandas.DataFrame\n \"\"\"\n cursor = self.cursor\n cursor_description = cursor.description\n\n if not isinstance(cursor, Cursor):\n if cursor is None:\n return geopandas.GeoDataFrame([])\n return cursor\n\n col_names = [c.name for c in cursor_description]\n result = pd.DataFrame(cursor.fetchall(), columns=col_names)\n\n # get geo types from pymapd\n geotypes = (\n pymapd_dtype.POINT,\n pymapd_dtype.LINESTRING,\n pymapd_dtype.POLYGON,\n pymapd_dtype.MULTIPOLYGON,\n pymapd_dtype.GEOMETRY,\n pymapd_dtype.GEOGRAPHY,\n )\n\n geo_column = None\n\n for d in cursor_description:\n field_name = d.name\n if d.type_code in geotypes:\n # use the first geo column found as default geometry\n # geopandas doesn't allow multiple GeoSeries\n # to specify other column as a geometry on a GeoDataFrame\n # use something like: df.set_geometry('buffers').plot()\n geo_column = geo_column or field_name\n result[field_name] = result[field_name].apply(\n shapely.wkt.loads\n )\n if geo_column:\n result = geopandas.GeoDataFrame(result, geometry=geo_column)\n return result\n\n\nclass OmniSciDBQuery(Query):\n \"\"\"OmniSciDB Query class.\"\"\"\n\n def _fetch(self, cursor):\n # check if cursor is a pymapd cursor.Cursor\n return self.schema().apply_to(cursor.to_df())\n\n\nclass OmniSciDBTable(ir.TableExpr, DatabaseEntity):\n \"\"\"References a physical table in the OmniSciDB metastore.\"\"\"\n\n @property\n def _qualified_name(self):\n return self.op().args[0]\n\n @property\n def _unqualified_name(self):\n return self._match_name()[1]\n\n @property\n def _client(self):\n return self.op().args[2]\n\n def _match_name(self):\n m = ddl.fully_qualified_re.match(self._qualified_name)\n if not m:\n raise com.IbisError(\n 'Cannot determine database name from {0}'.format(\n self._qualified_name\n )\n )\n db, quoted, unquoted = m.groups()\n return db, quoted or unquoted\n\n @property\n def _database(self):\n return self._match_name()[0]\n\n @com.mark_as_unsupported\n def invalidate_metadata(self):\n \"\"\"Invalidate table metadata.\n\n Raises\n ------\n common.exceptions.UnsupportedOperationError\n \"\"\"\n\n @com.mark_as_unsupported\n def refresh(self):\n \"\"\"Refresh table metadata.\n\n Raises\n ------\n common.exceptions.UnsupportedOperationError\n \"\"\"\n\n def metadata(self):\n \"\"\"\n Return parsed results of DESCRIBE FORMATTED statement.\n\n Returns\n -------\n metadata : pandas.DataFrame\n \"\"\"\n return pd.DataFrame(\n [\n (\n col.name,\n OmniSciDBDataType.parse(col.type),\n col.precision,\n col.scale,\n col.comp_param,\n col.encoding,\n )\n for col in self._client.con.get_table_details(\n self._qualified_name\n )\n ],\n columns=[\n 'column_name',\n 'type',\n 'precision',\n 'scale',\n 'comp_param',\n 'encoding',\n ],\n )\n\n describe_formatted = metadata\n\n def drop(self):\n \"\"\"Drop the table from the database.\"\"\"\n self._client.drop_table_or_view(self._qualified_name)\n\n def truncate(self):\n \"\"\"Delete all rows from, but do not drop, an existing table.\"\"\"\n self._client.truncate_table(self._qualified_name)\n\n def load_data(self, df):\n \"\"\"\n Load a data frame into database.\n\n Wraps the LOAD DATA DDL statement. Loads data into an OmniSciDB table\n from pandas.DataFrame or pyarrow.Table\n\n Parameters\n ----------\n df: pandas.DataFrame or pyarrow.Table\n\n Returns\n -------\n query : OmniSciDBQuery\n \"\"\"\n stmt = ddl.LoadData(self._qualified_name, df)\n return self._execute(stmt)\n\n @property\n def name(self) -> str:\n \"\"\"Return the operation name.\n\n Returns\n -------\n str\n \"\"\"\n return self.op().name\n\n def rename(self, new_name, database=None):\n \"\"\"\n Rename table to a given name.\n\n Parameters\n ----------\n new_name : string\n database : string\n\n Returns\n -------\n renamed : OmniSciDBTable\n \"\"\"\n m = ddl.fully_qualified_re.match(new_name)\n if not m and database is None:\n database = self._database\n\n statement = ddl.RenameTable(\n self._qualified_name, new_name, new_database=database\n )\n\n self._client._execute(statement)\n\n op = self.op().change_name(statement.new_qualified_name)\n return type(self)(op)\n\n def _execute(self, stmt):\n return self._client._execute(stmt)\n\n def alter(self, tbl_properties=None):\n \"\"\"\n Change setting and parameters of the table.\n\n Parameters\n ----------\n tbl_properties : dict, optional\n\n Returns\n -------\n None (for now)\n \"\"\"\n # internal function that runs DDL operation\n def _run_ddl(**kwds):\n stmt = ddl.AlterTable(self._qualified_name, **kwds)\n return self._execute(stmt)\n\n return self._alter_table_helper(\n _run_ddl, tbl_properties=tbl_properties\n )\n\n def _alter_table_helper(self, f, **alterations):\n results = []\n for k, v in alterations.items():\n if v is None:\n continue\n result = f(**{k: v})\n results.append(result)\n return results\n\n\nclass OmniSciDBClient(SQLClient):\n \"\"\"Client class for OmniSciDB backend.\"\"\"\n\n database_class = Database\n query_class = OmniSciDBQuery\n dialect = OmniSciDBDialect\n table_expr_class = OmniSciDBTable\n\n def __init__(\n self,\n uri: str = None,\n user: str = None,\n password: str = None,\n host: str = None,\n port: str = 6274,\n database: str = None,\n protocol: str = 'binary',\n session_id: str = None,\n execution_type: str = EXECUTION_TYPE_CURSOR,\n ):\n \"\"\"Initialize OmniSciDB Client.\n\n Parameters\n ----------\n uri : str, optional\n user : str, optional\n password : str, optional\n host : str, optional\n port : int, default 6274\n database : str, optional\n protocol : {'binary', 'http', 'https'}, default binary\n session_id: str, optional\n execution_type : {\n EXECUTION_TYPE_ICP, EXECUTION_TYPE_ICP_GPU, EXECUTION_TYPE_CURSOR\n }, default EXECUTION_TYPE_CURSOR\n\n Raises\n ------\n Exception\n if the given execution_type is not valid.\n PyMapDVersionError\n if session_id is given but pymapd version is less or equal to 0.12\n \"\"\"\n self.uri = uri\n self.user = user\n self.password = password\n self.host = host\n self.port = port\n self.db_name = database\n self.protocol = protocol\n self.session_id = session_id\n\n if execution_type not in (\n EXECUTION_TYPE_ICP,\n EXECUTION_TYPE_ICP_GPU,\n EXECUTION_TYPE_CURSOR,\n ):\n raise Exception('Execution type defined not available.')\n\n self.execution_type = execution_type\n\n if session_id:\n if self.version < pkg_resources.parse_version('0.12.0'):\n raise PyMapDVersionError(\n 'Must have pymapd > 0.12 to use session ID'\n )\n self.con = pymapd.connect(\n uri=uri,\n host=host,\n port=port,\n protocol=protocol,\n sessionid=session_id,\n )\n else:\n self.con = pymapd.connect(\n uri=uri,\n user=user,\n password=password,\n host=host,\n port=port,\n dbname=database,\n protocol=protocol,\n )\n\n def __del__(self):\n \"\"\"Close the connection when instance is deleted.\"\"\"\n self.close()\n\n def __enter__(self, **kwargs):\n \"\"\"Update internal attributes when using `with` statement.\"\"\"\n self.__dict__.update(**kwargs)\n return self\n\n def __exit__(self, *args):\n \"\"\"Close the connection when exits the `with` statement.\"\"\"\n self.close()\n\n def log(self, msg: str):\n \"\"\"Print or log a message.\n\n Parameters\n ----------\n msg : string\n \"\"\"\n log(msg)\n\n def close(self):\n \"\"\"Close OmniSciDB connection and drop any temporary objects.\"\"\"\n self.con.close()\n\n def _adapt_types(self, descr):\n names = []\n adapted_types = []\n for col in descr:\n names.append(col.name)\n adapted_types.append(\n OmniSciDBDataType._omniscidb_to_ibis_dtypes[col.type]\n )\n return names, adapted_types\n\n def _build_ast(self, expr, context):\n result = build_ast(expr, context)\n return result\n\n def _fully_qualified_name(self, name, database):\n # OmniSciDB raises error sometimes with qualified names\n return name\n\n def _get_list(self, cur):\n tuples = cur.cursor.fetchall()\n return [v[0] for v in tuples]\n\n def _get_schema_using_query(self, query):\n with self._execute(query, results=True) as result:\n # resets the state of the cursor and closes operation\n result.cursor.fetchall()\n names, ibis_types = self._adapt_types(\n _extract_column_details(result.cursor._result.row_set.row_desc)\n )\n\n return sch.Schema(names, ibis_types)\n\n def _get_schema_using_validator(self, query):\n result = self.con._client.sql_validate(self.con._session, query)\n return sch.Schema.from_tuples(\n (\n r,\n OmniSciDBDataType._omniscidb_to_ibis_dtypes[\n pymapd_dtype._VALUES_TO_NAMES[result[r].col_type.type]\n ],\n )\n for r in result\n )\n\n def _get_table_schema(self, table_name, database=None):\n \"\"\"Get table schema.\n\n Parameters\n ----------\n table_name : str\n database : str\n\n Returns\n -------\n schema : ibis Schema\n \"\"\"\n table_name_ = table_name.split('.')\n if len(table_name_) == 2:\n database, table_name = table_name_\n return self.get_schema(table_name, database)\n\n def _execute(self, query, results=True):\n \"\"\"Execute a query.\n\n Paramters\n ---------\n query : DDL or DML or string\n\n Returns\n -------\n result : pandas.DataFrame\n\n Raises\n ------\n Exception\n if execution method fails.\n \"\"\"\n if isinstance(query, (DDL, DML)):\n query = query.compile()\n\n if self.execution_type == EXECUTION_TYPE_ICP:\n execute = self.con.select_ipc\n elif self.execution_type == EXECUTION_TYPE_ICP_GPU:\n execute = self.con.select_ipc_gpu\n else:\n execute = self.con.cursor().execute\n\n cursor = (\n OmniSciDBGeoCursor\n if FULL_GEO_SUPPORTED\n else OmniSciDBDefaultCursor\n )\n\n try:\n result = cursor(execute(query))\n except Exception as e:\n raise Exception('{}: {}'.format(e, query))\n\n if results:\n return result\n\n def create_database(self, name, owner=None):\n \"\"\"\n Create a new OmniSciDB database.\n\n Parameters\n ----------\n name : string\n Database name\n \"\"\"\n statement = ddl.CreateDatabase(name, owner=owner)\n self._execute(statement)\n\n def describe_formatted(self, name: str) -> pd.DataFrame:\n \"\"\"Describe a given table name.\n\n Parameters\n ----------\n name : string\n\n Returns\n -------\n pandas.DataFrame\n \"\"\"\n return pd.DataFrame(\n [\n (\n col.name,\n OmniSciDBDataType.parse(col.type),\n col.precision,\n col.scale,\n col.comp_param,\n col.encoding,\n )\n for col in self.con.get_table_details(name)\n ],\n columns=[\n 'column_name',\n 'type',\n 'precision',\n 'scale',\n 'comp_param',\n 'encoding',\n ],\n )\n\n def drop_database(self, name, force=False):\n \"\"\"\n Drop an OmniSciDB database.\n\n Parameters\n ----------\n name : string\n Database name\n force : boolean, default False\n If False and there are any tables in this database, raises an\n IntegrityError\n\n Raises\n ------\n ibis.common.exceptions.IntegrityError\n if given database has tables and force is not define as True\n \"\"\"\n tables = []\n\n if not force or self.database(name):\n tables = self.list_tables(database=name)\n\n if not force and len(tables):\n raise com.IntegrityError(\n 'Database {0} must be empty before being dropped, or set '\n 'force=True'.format(name)\n )\n statement = ddl.DropDatabase(name)\n self._execute(statement)\n\n def create_user(self, name, password, is_super=False):\n \"\"\"\n Create a new OmniSciDB user.\n\n Parameters\n ----------\n name : string\n User name\n password : string\n Password\n is_super : bool\n if user is a superuser\n \"\"\"\n statement = ddl.CreateUser(\n name=name, password=password, is_super=is_super\n )\n self._execute(statement)\n\n def alter_user(\n self, name, password=None, is_super=None, insert_access=None\n ):\n \"\"\"\n Alter OmniSciDB user parameters.\n\n Parameters\n ----------\n name : string\n User name\n password : string\n Password\n is_super : bool\n If user is a superuser\n insert_access : string\n If users need to insert records to a database they do not own,\n use insert_access property to give them the required privileges.\n \"\"\"\n statement = ddl.AlterUser(\n name=name,\n password=password,\n is_super=is_super,\n insert_access=insert_access,\n )\n self._execute(statement)\n\n def drop_user(self, name):\n \"\"\"\n Drop a given user.\n\n Parameters\n ----------\n name : string\n User name\n \"\"\"\n statement = ddl.DropUser(name)\n self._execute(statement)\n\n def create_view(self, name, expr, database=None):\n \"\"\"\n Create a view with a given name from a table expression.\n\n Parameters\n ----------\n name : string\n expr : ibis TableExpr\n database : string, optional\n \"\"\"\n ast = self._build_ast(expr, OmniSciDBDialect.make_context())\n select = ast.queries[0]\n statement = ddl.CreateView(name, select, database=database)\n self._execute(statement)\n\n def drop_view(self, name, database=None):\n \"\"\"\n Drop a given view.\n\n Parameters\n ----------\n name : string\n database : string, default None\n \"\"\"\n statement = ddl.DropView(name, database=database)\n self._execute(statement, False)\n\n def create_table(\n self, table_name, obj=None, schema=None, database=None, max_rows=None\n ):\n \"\"\"\n Create a new table from an Ibis table expression.\n\n Parameters\n ----------\n table_name : string\n obj : TableExpr or pandas.DataFrame, optional\n If passed, creates table from select statement results\n schema : ibis.Schema, optional\n Mutually exclusive with expr, creates an empty table with a\n particular schema\n database : string, optional\n max_rows : int, optional\n Set the maximum number of rows allowed in a table to create a capped\n collection. When this limit is reached, the oldest fragment is\n removed. Default = 2^62.\n\n Examples\n --------\n >>> con.create_table('new_table_name', table_expr) # doctest: +SKIP\n \"\"\"\n _database = self.db_name\n self.set_database(database)\n\n if obj is not None:\n if isinstance(obj, pd.DataFrame):\n raise NotImplementedError(\n 'Pandas Data Frame input not implemented.'\n )\n else:\n to_insert = obj\n ast = self._build_ast(to_insert, OmniSciDBDialect.make_context())\n select = ast.queries[0]\n\n statement = ddl.CTAS(table_name, select, database=database)\n elif schema is not None:\n statement = ddl.CreateTableWithSchema(\n table_name, schema, database=database, max_rows=max_rows\n )\n else:\n raise com.IbisError('Must pass expr or schema')\n\n self._execute(statement, False)\n self.set_database(_database)\n\n def drop_table(self, table_name, database=None, force=False):\n \"\"\"\n Drop a given table.\n\n Parameters\n ----------\n table_name : string\n database : string, default None (optional)\n force : boolean, default False\n Database may throw exception if table does not exist\n\n Examples\n --------\n >>> table = 'my_table'\n >>> db = 'operations'\n >>> con.drop_table(table, database=db, force=True) # doctest: +SKIP\n \"\"\"\n _database = self.db_name\n self.set_database(database)\n\n statement = ddl.DropTable(\n table_name, database=database, must_exist=not force\n )\n self._execute(statement, False)\n self.set_database(_database)\n\n def truncate_table(self, table_name, database=None):\n \"\"\"\n Delete all rows from, but do not drop, an existing table.\n\n Parameters\n ----------\n table_name : string\n database : string, optional\n \"\"\"\n statement = ddl.TruncateTable(table_name, database=database)\n self._execute(statement, False)\n\n def drop_table_or_view(\n self, name: str, database: str = None, force: bool = False\n ):\n \"\"\"Attempt to drop a relation that may be a view or table.\n\n Parameters\n ----------\n name : str\n database : str, optional\n force : bool, optional\n\n Raises\n ------\n Exception\n if the drop operation fails.\n \"\"\"\n try:\n self.drop_table(name, database=database)\n except Exception as e:\n try:\n self.drop_view(name, database=database)\n except Exception:\n raise e\n\n def database(self, name=None):\n \"\"\"Connect to a given database.\n\n Parameters\n ----------\n name : str, optional\n The name of the database to connect to. If ``None``, return\n the database named ``self.current_database``.\n\n Returns\n -------\n db : Database\n An :class:`ibis.client.Database` instance.\n\n Notes\n -----\n This creates a new connection if `name` is both not ``None`` and not\n equal to the current database.\n \"\"\"\n if name == self.current_database or name is None:\n return self.database_class(self.current_database, self)\n else:\n client_class = type(self)\n new_client = client_class(\n uri=self.uri,\n user=self.user,\n password=self.password,\n host=self.host,\n port=self.port,\n database=name,\n protocol=self.protocol,\n session_id=self.session_id,\n execution_type=self.execution_type,\n )\n return self.database_class(name, new_client)\n\n def load_data(self, table_name, obj, database=None, **kwargs):\n \"\"\"Load data into a given table.\n\n Wraps the LOAD DATA DDL statement. Loads data into an OmniSciDB table\n by physically moving data files.\n\n Parameters\n ----------\n table_name : string\n obj: pandas.DataFrame or pyarrow.Table\n database : string, optional\n \"\"\"\n _database = self.db_name\n self.set_database(database)\n self.con.load_table(table_name, obj, **kwargs)\n self.set_database(_database)\n\n @property\n def current_database(self):\n \"\"\"Get the current database name.\"\"\"\n return self.db_name\n\n def set_database(self, name: str):\n \"\"\"Set a given database for the current connect.\n\n Parameters\n ----------\n name : string\n \"\"\"\n if self.db_name != name and name is not None:\n self.con.close()\n self.con = pymapd.connect(\n uri=self.uri,\n user=self.user,\n password=self.password,\n host=self.host,\n port=self.port,\n dbname=name,\n protocol=self.protocol,\n sessionid=self.session_id,\n )\n self.db_name = name\n\n @com.mark_as_unsupported\n def exists_database(self, name: str):\n \"\"\"Check if the given database exists.\n\n Parameters\n ----------\n name : str\n\n Raises\n ------\n NotImplementedError\n Method not supported yet.\n \"\"\"\n\n @com.mark_as_unsupported\n def list_databases(self, like: str = None):\n \"\"\"List all databases.\n\n Parameters\n ----------\n like : str, optional\n\n Raises\n ------\n NotImplementedError\n Method not supported yet.\n \"\"\"\n\n def exists_table(self, name: str, database: str = None):\n \"\"\"\n Determine if the indicated table or view exists.\n\n Parameters\n ----------\n name : string\n database : string, default None\n\n Returns\n -------\n if_exists : boolean\n \"\"\"\n return bool(self.list_tables(like=name, database=database))\n\n def list_tables(self, like: str = None, database: str = None) -> list:\n \"\"\"List all tables inside given or current database.\n\n Parameters\n ----------\n like : str, optional\n database : str, optional\n\n Returns\n -------\n list\n \"\"\"\n _database = None\n\n if not self.db_name == database:\n _database = self.db_name\n self.set_database(database)\n\n tables = self.con.get_tables()\n\n if _database:\n self.set_database(_database)\n\n if like is None:\n return tables\n pattern = re.compile(like)\n return list(filter(lambda t: pattern.findall(t), tables))\n\n def get_schema(self, table_name, database=None):\n \"\"\"\n Return a Schema object for the given table and database.\n\n Parameters\n ----------\n table_name : string\n May be fully qualified\n database : string, default None\n\n Returns\n -------\n schema : ibis Schema\n \"\"\"\n col_names = []\n col_types = []\n\n for col in self.con.get_table_details(table_name):\n col_names.append(col.name)\n col_types.append(OmniSciDBDataType.parse(col.type))\n\n return sch.schema(\n [\n (col.name, OmniSciDBDataType.parse(col.type))\n for col in self.con.get_table_details(table_name)\n ]\n )\n\n def sql(self, query: str):\n \"\"\"\n Convert a SQL query to an Ibis table expression.\n\n Parameters\n ----------\n query : string\n\n Returns\n -------\n table : TableExpr\n \"\"\"\n # Remove `;` + `--` (comment)\n query = re.sub(r'\\s*;\\s*--', '\\n--', query.strip())\n # Remove trailing ;\n query = re.sub(r'\\s*;\\s*$', '', query.strip())\n schema = self._get_schema_using_validator(query)\n return ops.SQLQueryResult(query, schema, self).to_expr()\n\n @property\n def version(self):\n \"\"\"Return the backend library version.\n\n Returns\n -------\n string\n Version of the backend library.\n \"\"\"\n # pymapd doesn't have __version__\n dist = pkg_resources.get_distribution('pymapd')\n return pkg_resources.parse_version(dist.version)\n\n\[email protected](OmniSciDBDataType)\ndef omniscidb_to_ibis_dtype(omniscidb_dtype):\n \"\"\"\n Register OmniSciDB Data Types.\n\n Parameters\n ----------\n omniscidb_dtype : OmniSciDBDataType\n\n Returns\n -------\n ibis.expr.datatypes.DataType\n \"\"\"\n return omniscidb_dtype.to_ibis()\n",
"path": "ibis/omniscidb/client.py"
}
] | [
{
"content": "\"\"\"Ibis OmniSciDB Client.\"\"\"\nimport pandas as pd\nimport pkg_resources\nimport pymapd\nimport regex as re\nfrom pymapd._parsers import _extract_column_details\nfrom pymapd.cursor import Cursor\nfrom pymapd.dtypes import TDatumType as pymapd_dtype\n\nimport ibis.common.exceptions as com\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.schema as sch\nimport ibis.expr.types as ir\nfrom ibis.client import Database, DatabaseEntity, Query, SQLClient\nfrom ibis.omniscidb import ddl\nfrom ibis.omniscidb.compiler import OmniSciDBDialect, build_ast\nfrom ibis.sql.compiler import DDL, DML\nfrom ibis.util import log\n\ntry:\n from cudf.dataframe.dataframe import DataFrame as GPUDataFrame\nexcept (ImportError, OSError):\n GPUDataFrame = None\n\n# used to check if geopandas and shapely is available\nFULL_GEO_SUPPORTED = False\ntry:\n import geopandas\n import shapely.wkt\n\n FULL_GEO_SUPPORTED = True\nexcept ImportError:\n ...\n\nEXECUTION_TYPE_ICP = 1\nEXECUTION_TYPE_ICP_GPU = 2\nEXECUTION_TYPE_CURSOR = 3\n\nfully_qualified_re = re.compile(r\"(.*)\\.(?:`(.*)`|(.*))\")\n\n\ndef _validate_compatible(from_schema, to_schema):\n if set(from_schema.names) != set(to_schema.names):\n raise com.IbisInputError('Schemas have different names')\n\n for name in from_schema:\n lt = from_schema[name]\n rt = to_schema[name]\n if not lt.castable(rt):\n raise com.IbisInputError(\n 'Cannot safely cast {0!r} to {1!r}'.format(lt, rt)\n )\n return\n\n\nclass PyMapDVersionError(Exception):\n \"\"\"PyMapD version error exception.\"\"\"\n\n pass\n\n\nclass OmniSciDBDataType:\n \"\"\"OmniSciDB Backend Data Type.\"\"\"\n\n __slots__ = 'typename', 'nullable'\n\n # using impala.client._HS2_TTypeId_to_dtype as reference\n dtypes = {\n 'BIGINT': dt.int64,\n 'BOOL': dt.Boolean,\n 'DATE': dt.date,\n 'DECIMAL': dt.Decimal(18, 9),\n 'DOUBLE': dt.double,\n 'FLOAT': dt.float32,\n 'INT': dt.int32,\n 'LINESTRING': dt.linestring,\n 'MULTIPOLYGON': dt.multipolygon,\n 'NULL': dt.Null,\n 'NUMERIC': dt.Decimal(18, 9),\n 'POINT': dt.point,\n 'POLYGON': dt.polygon,\n 'SMALLINT': dt.int16,\n 'STR': dt.string,\n 'TIME': dt.time,\n 'TIMESTAMP': dt.timestamp,\n 'TINYINT': dt.int8,\n }\n\n ibis_dtypes = {v: k for k, v in dtypes.items()}\n\n _omniscidb_to_ibis_dtypes = {\n 'BIGINT': 'int64',\n 'BOOLEAN': 'Boolean',\n 'BOOL': 'Boolean',\n 'CHAR': 'string',\n 'DATE': 'date',\n 'DECIMAL': 'decimal',\n 'DOUBLE': 'double',\n 'INT': 'int32',\n 'INTEGER': 'int32',\n 'FLOAT': 'float32',\n 'NUMERIC': 'float64',\n 'REAL': 'float32',\n 'SMALLINT': 'int16',\n 'STR': 'string',\n 'TEXT': 'string',\n 'TIME': 'time',\n 'TIMESTAMP': 'timestamp',\n 'VARCHAR': 'string',\n 'POINT': 'point',\n 'LINESTRING': 'linestring',\n 'POLYGON': 'polygon',\n 'MULTIPOLYGON': 'multipolygon',\n }\n\n def __init__(self, typename, nullable=True):\n if typename not in self.dtypes:\n raise com.UnsupportedBackendType(typename)\n self.typename = typename\n self.nullable = nullable\n\n def __str__(self):\n \"\"\"Return the data type name.\"\"\"\n if self.nullable:\n return 'Nullable({})'.format(self.typename)\n else:\n return self.typename\n\n def __repr__(self):\n \"\"\"Return the backend name and the datatype name.\"\"\"\n return '<OmniSciDB {}>'.format(str(self))\n\n @classmethod\n def parse(cls, spec: str):\n \"\"\"Return a OmniSciDBDataType related to the given data type name.\n\n Parameters\n ----------\n spec : string\n\n Returns\n -------\n OmniSciDBDataType\n \"\"\"\n if spec.startswith('Nullable'):\n return cls(spec[9:-1], nullable=True)\n else:\n return cls(spec)\n\n def to_ibis(self):\n \"\"\"\n Return the Ibis data type correspondent to the current OmniSciDB type.\n\n Returns\n -------\n ibis.expr.datatypes.DataType\n \"\"\"\n return self.dtypes[self.typename](nullable=self.nullable)\n\n @classmethod\n def from_ibis(cls, dtype, nullable=None):\n \"\"\"\n Return a OmniSciDBDataType correspondent to the given Ibis data type.\n\n Parameters\n ----------\n dtype : ibis.expr.datatypes.DataType\n nullable : bool\n\n Returns\n -------\n OmniSciDBDataType\n\n Raises\n ------\n NotImplementedError\n if the given data type was not implemented.\n \"\"\"\n dtype_ = type(dtype)\n if dtype_ in cls.ibis_dtypes:\n typename = cls.ibis_dtypes[dtype_]\n elif dtype in cls.ibis_dtypes:\n typename = cls.ibis_dtypes[dtype]\n else:\n raise NotImplementedError('{} dtype not implemented'.format(dtype))\n\n if nullable is None:\n nullable = dtype.nullable\n return cls(typename, nullable=nullable)\n\n\nclass OmniSciDBDefaultCursor:\n \"\"\"Default cursor that exports a result to Pandas Data Frame.\"\"\"\n\n def __init__(self, cursor):\n self.cursor = cursor\n\n def to_df(self):\n \"\"\"Convert the cursor to a data frame.\n\n Returns\n -------\n dataframe : pandas.DataFrame\n \"\"\"\n if isinstance(self.cursor, Cursor):\n col_names = [c.name for c in self.cursor.description]\n result = pd.DataFrame(self.cursor.fetchall(), columns=col_names)\n elif self.cursor is None:\n result = pd.DataFrame([])\n else:\n result = self.cursor\n\n return result\n\n def __enter__(self):\n \"\"\"For compatibility when constructed from Query.execute().\"\"\"\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Exit when using `with` statement.\"\"\"\n pass\n\n\nclass OmniSciDBGeoCursor(OmniSciDBDefaultCursor):\n \"\"\"Cursor that exports result to GeoPandas Data Frame.\"\"\"\n\n def to_df(self):\n \"\"\"Convert the cursor to a data frame.\n\n Returns\n -------\n dataframe : pandas.DataFrame\n \"\"\"\n cursor = self.cursor\n cursor_description = cursor.description\n\n if not isinstance(cursor, Cursor):\n if cursor is None:\n return geopandas.GeoDataFrame([])\n return cursor\n\n col_names = [c.name for c in cursor_description]\n result = pd.DataFrame(cursor.fetchall(), columns=col_names)\n\n # get geo types from pymapd\n geotypes = (\n pymapd_dtype.POINT,\n pymapd_dtype.LINESTRING,\n pymapd_dtype.POLYGON,\n pymapd_dtype.MULTIPOLYGON,\n pymapd_dtype.GEOMETRY,\n pymapd_dtype.GEOGRAPHY,\n )\n\n geo_column = None\n\n for d in cursor_description:\n field_name = d.name\n if d.type_code in geotypes:\n # use the first geo column found as default geometry\n # geopandas doesn't allow multiple GeoSeries\n # to specify other column as a geometry on a GeoDataFrame\n # use something like: df.set_geometry('buffers').plot()\n geo_column = geo_column or field_name\n result[field_name] = result[field_name].apply(\n shapely.wkt.loads\n )\n if geo_column:\n result = geopandas.GeoDataFrame(result, geometry=geo_column)\n return result\n\n\nclass OmniSciDBQuery(Query):\n \"\"\"OmniSciDB Query class.\"\"\"\n\n def _fetch(self, cursor):\n # check if cursor is a pymapd cursor.Cursor\n return self.schema().apply_to(cursor.to_df())\n\n\nclass OmniSciDBTable(ir.TableExpr, DatabaseEntity):\n \"\"\"References a physical table in the OmniSciDB metastore.\"\"\"\n\n @property\n def _qualified_name(self):\n return self.op().args[0]\n\n @property\n def _unqualified_name(self):\n return self._match_name()[1]\n\n @property\n def _client(self):\n return self.op().args[2]\n\n def _match_name(self):\n m = ddl.fully_qualified_re.match(self._qualified_name)\n if not m:\n raise com.IbisError(\n 'Cannot determine database name from {0}'.format(\n self._qualified_name\n )\n )\n db, quoted, unquoted = m.groups()\n return db, quoted or unquoted\n\n @property\n def _database(self):\n return self._match_name()[0]\n\n @com.mark_as_unsupported\n def invalidate_metadata(self):\n \"\"\"Invalidate table metadata.\n\n Raises\n ------\n common.exceptions.UnsupportedOperationError\n \"\"\"\n\n @com.mark_as_unsupported\n def refresh(self):\n \"\"\"Refresh table metadata.\n\n Raises\n ------\n common.exceptions.UnsupportedOperationError\n \"\"\"\n\n def metadata(self):\n \"\"\"\n Return parsed results of DESCRIBE FORMATTED statement.\n\n Returns\n -------\n metadata : pandas.DataFrame\n \"\"\"\n return pd.DataFrame(\n [\n (\n col.name,\n OmniSciDBDataType.parse(col.type),\n col.precision,\n col.scale,\n col.comp_param,\n col.encoding,\n )\n for col in self._client.con.get_table_details(\n self._qualified_name\n )\n ],\n columns=[\n 'column_name',\n 'type',\n 'precision',\n 'scale',\n 'comp_param',\n 'encoding',\n ],\n )\n\n describe_formatted = metadata\n\n def drop(self):\n \"\"\"Drop the table from the database.\"\"\"\n self._client.drop_table_or_view(self._qualified_name)\n\n def truncate(self):\n \"\"\"Delete all rows from, but do not drop, an existing table.\"\"\"\n self._client.truncate_table(self._qualified_name)\n\n def load_data(self, df):\n \"\"\"\n Load a data frame into database.\n\n Wraps the LOAD DATA DDL statement. Loads data into an OmniSciDB table\n from pandas.DataFrame or pyarrow.Table\n\n Parameters\n ----------\n df: pandas.DataFrame or pyarrow.Table\n\n Returns\n -------\n query : OmniSciDBQuery\n \"\"\"\n stmt = ddl.LoadData(self._qualified_name, df)\n return self._execute(stmt)\n\n @property\n def name(self) -> str:\n \"\"\"Return the operation name.\n\n Returns\n -------\n str\n \"\"\"\n return self.op().name\n\n def rename(self, new_name, database=None):\n \"\"\"\n Rename table to a given name.\n\n Parameters\n ----------\n new_name : string\n database : string\n\n Returns\n -------\n renamed : OmniSciDBTable\n \"\"\"\n m = ddl.fully_qualified_re.match(new_name)\n if not m and database is None:\n database = self._database\n\n statement = ddl.RenameTable(\n self._qualified_name, new_name, new_database=database\n )\n\n self._client._execute(statement)\n\n op = self.op().change_name(statement.new_qualified_name)\n return type(self)(op)\n\n def _execute(self, stmt):\n return self._client._execute(stmt)\n\n def alter(self, tbl_properties=None):\n \"\"\"\n Change setting and parameters of the table.\n\n Parameters\n ----------\n tbl_properties : dict, optional\n\n Returns\n -------\n None (for now)\n \"\"\"\n # internal function that runs DDL operation\n def _run_ddl(**kwds):\n stmt = ddl.AlterTable(self._qualified_name, **kwds)\n return self._execute(stmt)\n\n return self._alter_table_helper(\n _run_ddl, tbl_properties=tbl_properties\n )\n\n def _alter_table_helper(self, f, **alterations):\n results = []\n for k, v in alterations.items():\n if v is None:\n continue\n result = f(**{k: v})\n results.append(result)\n return results\n\n\nclass OmniSciDBClient(SQLClient):\n \"\"\"Client class for OmniSciDB backend.\"\"\"\n\n database_class = Database\n query_class = OmniSciDBQuery\n dialect = OmniSciDBDialect\n table_expr_class = OmniSciDBTable\n\n def __init__(\n self,\n uri: str = None,\n user: str = None,\n password: str = None,\n host: str = None,\n port: str = 6274,\n database: str = None,\n protocol: str = 'binary',\n session_id: str = None,\n execution_type: str = EXECUTION_TYPE_CURSOR,\n ):\n \"\"\"Initialize OmniSciDB Client.\n\n Parameters\n ----------\n uri : str, optional\n user : str, optional\n password : str, optional\n host : str, optional\n port : int, default 6274\n database : str, optional\n protocol : {'binary', 'http', 'https'}, default binary\n session_id: str, optional\n execution_type : {\n EXECUTION_TYPE_ICP, EXECUTION_TYPE_ICP_GPU, EXECUTION_TYPE_CURSOR\n }, default EXECUTION_TYPE_CURSOR\n\n Raises\n ------\n Exception\n if the given execution_type is not valid.\n PyMapDVersionError\n if session_id is given but pymapd version is less or equal to 0.12\n \"\"\"\n self.uri = uri\n self.user = user\n self.password = password\n self.host = host\n self.port = port\n self.db_name = database\n self.protocol = protocol\n self.session_id = session_id\n\n if execution_type not in (\n EXECUTION_TYPE_ICP,\n EXECUTION_TYPE_ICP_GPU,\n EXECUTION_TYPE_CURSOR,\n ):\n raise Exception('Execution type defined not available.')\n\n self.execution_type = execution_type\n\n if session_id:\n if self.version < pkg_resources.parse_version('0.12.0'):\n raise PyMapDVersionError(\n 'Must have pymapd > 0.12 to use session ID'\n )\n self.con = pymapd.connect(\n uri=uri,\n host=host,\n port=port,\n protocol=protocol,\n sessionid=session_id,\n )\n else:\n self.con = pymapd.connect(\n uri=uri,\n user=user,\n password=password,\n host=host,\n port=port,\n dbname=database,\n protocol=protocol,\n )\n\n def __del__(self):\n \"\"\"Close the connection when instance is deleted.\"\"\"\n self.close()\n\n def __enter__(self, **kwargs):\n \"\"\"Update internal attributes when using `with` statement.\"\"\"\n self.__dict__.update(**kwargs)\n return self\n\n def __exit__(self, *args):\n \"\"\"Close the connection when exits the `with` statement.\"\"\"\n self.close()\n\n def log(self, msg: str):\n \"\"\"Print or log a message.\n\n Parameters\n ----------\n msg : string\n \"\"\"\n log(msg)\n\n def close(self):\n \"\"\"Close OmniSciDB connection and drop any temporary objects.\"\"\"\n self.con.close()\n\n def _adapt_types(self, descr):\n names = []\n adapted_types = []\n for col in descr:\n names.append(col.name)\n adapted_types.append(\n OmniSciDBDataType._omniscidb_to_ibis_dtypes[col.type]\n )\n return names, adapted_types\n\n def _build_ast(self, expr, context):\n result = build_ast(expr, context)\n return result\n\n def _fully_qualified_name(self, name, database):\n # OmniSciDB raises error sometimes with qualified names\n return name\n\n def _get_list(self, cur):\n tuples = cur.cursor.fetchall()\n return [v[0] for v in tuples]\n\n def _get_schema_using_query(self, query):\n with self._execute(query, results=True) as result:\n # resets the state of the cursor and closes operation\n result.cursor.fetchall()\n names, ibis_types = self._adapt_types(\n _extract_column_details(result.cursor._result.row_set.row_desc)\n )\n\n return sch.Schema(names, ibis_types)\n\n def _get_schema_using_validator(self, query):\n result = self.con._client.sql_validate(self.con._session, query)\n return sch.Schema.from_tuples(\n (\n r,\n OmniSciDBDataType._omniscidb_to_ibis_dtypes[\n pymapd_dtype._VALUES_TO_NAMES[result[r].col_type.type]\n ],\n )\n for r in result\n )\n\n def _get_table_schema(self, table_name, database=None):\n \"\"\"Get table schema.\n\n Parameters\n ----------\n table_name : str\n database : str\n\n Returns\n -------\n schema : ibis Schema\n \"\"\"\n table_name_ = table_name.split('.')\n if len(table_name_) == 2:\n database, table_name = table_name_\n return self.get_schema(table_name, database)\n\n def _execute(self, query, results=True):\n \"\"\"Execute a query.\n\n Paramters\n ---------\n query : DDL or DML or string\n\n Returns\n -------\n result : pandas.DataFrame\n\n Raises\n ------\n Exception\n if execution method fails.\n \"\"\"\n if isinstance(query, (DDL, DML)):\n query = query.compile()\n\n if self.execution_type == EXECUTION_TYPE_ICP:\n execute = self.con.select_ipc\n elif self.execution_type == EXECUTION_TYPE_ICP_GPU:\n execute = self.con.select_ipc_gpu\n else:\n execute = self.con.cursor().execute\n\n cursor = (\n OmniSciDBGeoCursor\n if FULL_GEO_SUPPORTED\n else OmniSciDBDefaultCursor\n )\n\n try:\n result = cursor(execute(query))\n except Exception as e:\n raise Exception('{}: {}'.format(e, query))\n\n if results:\n return result\n\n def create_database(self, name, owner=None):\n \"\"\"\n Create a new OmniSciDB database.\n\n Parameters\n ----------\n name : string\n Database name\n \"\"\"\n statement = ddl.CreateDatabase(name, owner=owner)\n self._execute(statement)\n\n def describe_formatted(self, name: str) -> pd.DataFrame:\n \"\"\"Describe a given table name.\n\n Parameters\n ----------\n name : string\n\n Returns\n -------\n pandas.DataFrame\n \"\"\"\n return pd.DataFrame(\n [\n (\n col.name,\n OmniSciDBDataType.parse(col.type),\n col.precision,\n col.scale,\n col.comp_param,\n col.encoding,\n )\n for col in self.con.get_table_details(name)\n ],\n columns=[\n 'column_name',\n 'type',\n 'precision',\n 'scale',\n 'comp_param',\n 'encoding',\n ],\n )\n\n def drop_database(self, name, force=False):\n \"\"\"\n Drop an OmniSciDB database.\n\n Parameters\n ----------\n name : string\n Database name\n force : boolean, default False\n If False and there are any tables in this database, raises an\n IntegrityError\n\n Raises\n ------\n ibis.common.exceptions.IntegrityError\n if given database has tables and force is not define as True\n \"\"\"\n tables = []\n\n if not force or self.database(name):\n tables = self.list_tables(database=name)\n\n if not force and len(tables):\n raise com.IntegrityError(\n 'Database {0} must be empty before being dropped, or set '\n 'force=True'.format(name)\n )\n statement = ddl.DropDatabase(name)\n self._execute(statement)\n\n def create_user(self, name, password, is_super=False):\n \"\"\"\n Create a new OmniSciDB user.\n\n Parameters\n ----------\n name : string\n User name\n password : string\n Password\n is_super : bool\n if user is a superuser\n \"\"\"\n statement = ddl.CreateUser(\n name=name, password=password, is_super=is_super\n )\n self._execute(statement)\n\n def alter_user(\n self, name, password=None, is_super=None, insert_access=None\n ):\n \"\"\"\n Alter OmniSciDB user parameters.\n\n Parameters\n ----------\n name : string\n User name\n password : string\n Password\n is_super : bool\n If user is a superuser\n insert_access : string\n If users need to insert records to a database they do not own,\n use insert_access property to give them the required privileges.\n \"\"\"\n statement = ddl.AlterUser(\n name=name,\n password=password,\n is_super=is_super,\n insert_access=insert_access,\n )\n self._execute(statement)\n\n def drop_user(self, name):\n \"\"\"\n Drop a given user.\n\n Parameters\n ----------\n name : string\n User name\n \"\"\"\n statement = ddl.DropUser(name)\n self._execute(statement)\n\n def create_view(self, name, expr, database=None):\n \"\"\"\n Create a view with a given name from a table expression.\n\n Parameters\n ----------\n name : string\n expr : ibis TableExpr\n database : string, optional\n \"\"\"\n ast = self._build_ast(expr, OmniSciDBDialect.make_context())\n select = ast.queries[0]\n statement = ddl.CreateView(name, select, database=database)\n self._execute(statement)\n\n def drop_view(self, name, database=None):\n \"\"\"\n Drop a given view.\n\n Parameters\n ----------\n name : string\n database : string, default None\n \"\"\"\n statement = ddl.DropView(name, database=database)\n self._execute(statement, False)\n\n def create_table(\n self, table_name, obj=None, schema=None, database=None, max_rows=None\n ):\n \"\"\"\n Create a new table from an Ibis table expression.\n\n Parameters\n ----------\n table_name : string\n obj : TableExpr or pandas.DataFrame, optional\n If passed, creates table from select statement results\n schema : ibis.Schema, optional\n Mutually exclusive with expr, creates an empty table with a\n particular schema\n database : string, optional\n max_rows : int, optional\n Set the maximum number of rows allowed in a table to create a capped\n collection. When this limit is reached, the oldest fragment is\n removed. Default = 2^62.\n\n Examples\n --------\n >>> con.create_table('new_table_name', table_expr) # doctest: +SKIP\n \"\"\"\n _database = self.db_name\n self.set_database(database)\n\n if obj is not None:\n if isinstance(obj, pd.DataFrame):\n raise NotImplementedError(\n 'Pandas Data Frame input not implemented.'\n )\n else:\n to_insert = obj\n ast = self._build_ast(to_insert, OmniSciDBDialect.make_context())\n select = ast.queries[0]\n\n statement = ddl.CTAS(table_name, select, database=database)\n elif schema is not None:\n statement = ddl.CreateTableWithSchema(\n table_name, schema, database=database, max_rows=max_rows\n )\n else:\n raise com.IbisError('Must pass expr or schema')\n\n self._execute(statement, False)\n self.set_database(_database)\n\n def drop_table(self, table_name, database=None, force=False):\n \"\"\"\n Drop a given table.\n\n Parameters\n ----------\n table_name : string\n database : string, default None (optional)\n force : boolean, default False\n Database may throw exception if table does not exist\n\n Examples\n --------\n >>> table = 'my_table'\n >>> db = 'operations'\n >>> con.drop_table(table, database=db, force=True) # doctest: +SKIP\n \"\"\"\n _database = self.db_name\n self.set_database(database)\n\n statement = ddl.DropTable(\n table_name, database=database, must_exist=not force\n )\n self._execute(statement, False)\n self.set_database(_database)\n\n def truncate_table(self, table_name, database=None):\n \"\"\"\n Delete all rows from, but do not drop, an existing table.\n\n Parameters\n ----------\n table_name : string\n database : string, optional\n \"\"\"\n statement = ddl.TruncateTable(table_name, database=database)\n self._execute(statement, False)\n\n def drop_table_or_view(\n self, name: str, database: str = None, force: bool = False\n ):\n \"\"\"Attempt to drop a relation that may be a view or table.\n\n Parameters\n ----------\n name : str\n database : str, optional\n force : bool, optional\n\n Raises\n ------\n Exception\n if the drop operation fails.\n \"\"\"\n try:\n self.drop_table(name, database=database)\n except Exception as e:\n try:\n self.drop_view(name, database=database)\n except Exception:\n raise e\n\n def database(self, name=None):\n \"\"\"Connect to a given database.\n\n Parameters\n ----------\n name : str, optional\n The name of the database to connect to. If ``None``, return\n the database named ``self.current_database``.\n\n Returns\n -------\n db : Database\n An :class:`ibis.client.Database` instance.\n\n Notes\n -----\n This creates a new connection if `name` is both not ``None`` and not\n equal to the current database.\n \"\"\"\n if name == self.current_database or name is None:\n return self.database_class(self.current_database, self)\n else:\n client_class = type(self)\n new_client = client_class(\n uri=self.uri,\n user=self.user,\n password=self.password,\n host=self.host,\n port=self.port,\n database=name,\n protocol=self.protocol,\n session_id=self.session_id,\n execution_type=self.execution_type,\n )\n return self.database_class(name, new_client)\n\n def load_data(self, table_name, obj, database=None, **kwargs):\n \"\"\"Load data into a given table.\n\n Wraps the LOAD DATA DDL statement. Loads data into an OmniSciDB table\n by physically moving data files.\n\n Parameters\n ----------\n table_name : string\n obj: pandas.DataFrame or pyarrow.Table\n database : string, optional\n \"\"\"\n _database = self.db_name\n self.set_database(database)\n self.con.load_table(table_name, obj, **kwargs)\n self.set_database(_database)\n\n @property\n def current_database(self):\n \"\"\"Get the current database name.\"\"\"\n return self.db_name\n\n def set_database(self, name: str):\n \"\"\"Set a given database for the current connect.\n\n Parameters\n ----------\n name : string\n \"\"\"\n if self.db_name != name and name is not None:\n self.con.close()\n self.con = pymapd.connect(\n uri=self.uri,\n user=self.user,\n password=self.password,\n host=self.host,\n port=self.port,\n dbname=name,\n protocol=self.protocol,\n sessionid=self.session_id,\n )\n self.db_name = name\n\n @com.mark_as_unsupported\n def exists_database(self, name: str):\n \"\"\"Check if the given database exists.\n\n Parameters\n ----------\n name : str\n\n Raises\n ------\n NotImplementedError\n Method not supported yet.\n \"\"\"\n\n @com.mark_as_unsupported\n def list_databases(self, like: str = None):\n \"\"\"List all databases.\n\n Parameters\n ----------\n like : str, optional\n\n Raises\n ------\n NotImplementedError\n Method not supported yet.\n \"\"\"\n\n def exists_table(self, name: str, database: str = None):\n \"\"\"\n Determine if the indicated table or view exists.\n\n Parameters\n ----------\n name : string\n database : string, default None\n\n Returns\n -------\n if_exists : boolean\n \"\"\"\n return bool(self.list_tables(like=name, database=database))\n\n def list_tables(self, like: str = None, database: str = None) -> list:\n \"\"\"List all tables inside given or current database.\n\n Parameters\n ----------\n like : str, optional\n database : str, optional\n\n Returns\n -------\n list\n \"\"\"\n _database = None\n\n if not self.db_name == database:\n _database = self.db_name\n self.set_database(database)\n\n tables = self.con.get_tables()\n\n if _database:\n self.set_database(_database)\n\n if like is None:\n return tables\n pattern = re.compile(like)\n return list(filter(lambda t: pattern.findall(t), tables))\n\n def get_schema(self, table_name, database=None):\n \"\"\"\n Return a Schema object for the given table and database.\n\n Parameters\n ----------\n table_name : string\n May be fully qualified\n database : string, default None\n\n Returns\n -------\n schema : ibis Schema\n \"\"\"\n col_names = []\n col_types = []\n\n for col in self.con.get_table_details(table_name):\n col_names.append(col.name)\n col_types.append(OmniSciDBDataType.parse(col.type))\n\n return sch.schema(\n [\n (col.name, OmniSciDBDataType.parse(col.type))\n for col in self.con.get_table_details(table_name)\n ]\n )\n\n def sql(self, query: str):\n \"\"\"\n Convert a SQL query to an Ibis table expression.\n\n Parameters\n ----------\n query : string\n\n Returns\n -------\n table : TableExpr\n \"\"\"\n # Remove `;` + `--` (comment)\n query = re.sub(r'\\s*;\\s*--', '\\n--', query.strip())\n # Remove trailing ;\n query = re.sub(r'\\s*;\\s*$', '', query.strip())\n schema = self._get_schema_using_validator(query)\n return ops.SQLQueryResult(query, schema, self).to_expr()\n\n @property\n def version(self):\n \"\"\"Return the backend library version.\n\n Returns\n -------\n string\n Version of the backend library.\n \"\"\"\n # pymapd doesn't have __version__\n dist = pkg_resources.get_distribution('pymapd')\n return pkg_resources.parse_version(dist.version)\n\n\[email protected](OmniSciDBDataType)\ndef omniscidb_to_ibis_dtype(omniscidb_dtype):\n \"\"\"\n Register OmniSciDB Data Types.\n\n Parameters\n ----------\n omniscidb_dtype : OmniSciDBDataType\n\n Returns\n -------\n ibis.expr.datatypes.DataType\n \"\"\"\n return omniscidb_dtype.to_ibis()\n",
"path": "ibis/omniscidb/client.py"
}
] | diff --git a/docs/source/release.rst b/docs/source/release.rst
index 52efa4d0293c..7a58fd952f3d 100644
--- a/docs/source/release.rst
+++ b/docs/source/release.rst
@@ -9,6 +9,7 @@ Release Notes
* :feature:`2048` Introduce a top level vectorized UDF module (experimental). Implement element-wise UDF for pandas and PySpark backend.
* :release:`1.2.1 <pending>`
+* :bug:`2055` Fix "cudf" import on OmniSciDB backend
* :support:`2034` Add initial documentation for OmniSciDB, MySQL, PySpark and SparkSQL backends, add initial documentation for geospatial methods and add links to Ibis wiki page
* :bug:`2050` CI: Drop table only if it exists
* :feature:`2044` Implement covariance for bigquery backend
diff --git a/ibis/omniscidb/client.py b/ibis/omniscidb/client.py
index cd646a6c9d18..ce275ee159a2 100644
--- a/ibis/omniscidb/client.py
+++ b/ibis/omniscidb/client.py
@@ -20,7 +20,7 @@
try:
from cudf.dataframe.dataframe import DataFrame as GPUDataFrame
-except ImportError:
+except (ImportError, OSError):
GPUDataFrame = None
# used to check if geopandas and shapely is available
|
pyca__cryptography-591 | Bind X509_verify_cert_error_string
pyOpenSSL 0.14 needs this. https://github.com/pyca/pyopenssl/issues/30
| [
{
"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nINCLUDES = \"\"\"\n#include <openssl/ssl.h>\n\n/*\n * This is part of a work-around for the difficulty cffi has in dealing with\n * `STACK_OF(foo)` as the name of a type. We invent a new, simpler name that\n * will be an alias for this type and use the alias throughout. This works\n * together with another opaque typedef for the same name in the TYPES section.\n * Note that the result is an opaque type.\n */\ntypedef STACK_OF(X509) Cryptography_STACK_OF_X509;\ntypedef STACK_OF(X509_REVOKED) Cryptography_STACK_OF_X509_REVOKED;\n\"\"\"\n\nTYPES = \"\"\"\ntypedef ... Cryptography_STACK_OF_X509;\ntypedef ... Cryptography_STACK_OF_X509_REVOKED;\n\ntypedef struct {\n ASN1_OBJECT *algorithm;\n ...;\n} X509_ALGOR;\n\ntypedef struct {\n X509_ALGOR *signature;\n ...;\n} X509_CINF;\n\ntypedef struct {\n ASN1_OBJECT *object;\n ASN1_BOOLEAN critical;\n ASN1_OCTET_STRING *value;\n} X509_EXTENSION;\n\ntypedef ... X509_EXTENSIONS;\n\ntypedef ... X509_REQ;\n\ntypedef struct {\n ASN1_INTEGER *serialNumber;\n ASN1_TIME *revocationDate;\n X509_EXTENSIONS *extensions;\n int sequence;\n ...;\n} X509_REVOKED;\n\ntypedef struct {\n Cryptography_STACK_OF_X509_REVOKED *revoked;\n ...;\n} X509_CRL_INFO;\n\ntypedef struct {\n X509_CRL_INFO *crl;\n ...;\n} X509_CRL;\n\ntypedef struct {\n X509_CINF *cert_info;\n ...;\n} X509;\n\ntypedef ... X509_STORE;\ntypedef ... NETSCAPE_SPKI;\n\"\"\"\n\nFUNCTIONS = \"\"\"\nX509 *X509_new(void);\nvoid X509_free(X509 *);\nX509 *X509_dup(X509 *);\n\nint X509_print_ex(BIO *, X509 *, unsigned long, unsigned long);\n\nint X509_set_version(X509 *, long);\n\nEVP_PKEY *X509_get_pubkey(X509 *);\nint X509_set_pubkey(X509 *, EVP_PKEY *);\n\nunsigned char *X509_alias_get0(X509 *, int *);\nint X509_sign(X509 *, EVP_PKEY *, const EVP_MD *);\n\nint X509_digest(const X509 *, const EVP_MD *, unsigned char *, unsigned int *);\n\nASN1_TIME *X509_gmtime_adj(ASN1_TIME *, long);\n\nunsigned long X509_subject_name_hash(X509 *);\n\nX509_NAME *X509_get_subject_name(X509 *);\nint X509_set_subject_name(X509 *, X509_NAME *);\n\nX509_NAME *X509_get_issuer_name(X509 *);\nint X509_set_issuer_name(X509 *, X509_NAME *);\n\nint X509_get_ext_count(X509 *);\nint X509_add_ext(X509 *, X509_EXTENSION *, int);\nX509_EXTENSION *X509_EXTENSION_dup(X509_EXTENSION *);\nX509_EXTENSION *X509_get_ext(X509 *, int);\nint X509_EXTENSION_get_critical(X509_EXTENSION *);\nASN1_OBJECT *X509_EXTENSION_get_object(X509_EXTENSION *);\nvoid X509_EXTENSION_free(X509_EXTENSION *);\n\nint X509_REQ_set_version(X509_REQ *, long);\nX509_REQ *X509_REQ_new(void);\nvoid X509_REQ_free(X509_REQ *);\nint X509_REQ_set_pubkey(X509_REQ *, EVP_PKEY *);\nint X509_REQ_sign(X509_REQ *, EVP_PKEY *, const EVP_MD *);\nint X509_REQ_verify(X509_REQ *, EVP_PKEY *);\nEVP_PKEY *X509_REQ_get_pubkey(X509_REQ *);\nint X509_REQ_add_extensions(X509_REQ *, X509_EXTENSIONS *);\nX509_EXTENSIONS *X509_REQ_get_extensions(X509_REQ *);\nint X509_REQ_print_ex(BIO *, X509_REQ *, unsigned long, unsigned long);\n\nint X509V3_EXT_print(BIO *, X509_EXTENSION *, unsigned long, int);\nASN1_OCTET_STRING *X509_EXTENSION_get_data(X509_EXTENSION *);\n\nX509_REVOKED *X509_REVOKED_new(void);\nvoid X509_REVOKED_free(X509_REVOKED *);\n\nint X509_REVOKED_set_serialNumber(X509_REVOKED *, ASN1_INTEGER *);\n\nint X509_REVOKED_add1_ext_i2d(X509_REVOKED *, int, void *, int, unsigned long);\n\nX509_CRL *d2i_X509_CRL_bio(BIO *, X509_CRL **);\nX509_CRL *X509_CRL_new(void);\nvoid X509_CRL_free(X509_CRL *);\nint X509_CRL_add0_revoked(X509_CRL *, X509_REVOKED *);\nint i2d_X509_CRL_bio(BIO *, X509_CRL *);\nint X509_CRL_print(BIO *, X509_CRL *);\nint X509_CRL_set_issuer_name(X509_CRL *, X509_NAME *);\nint X509_CRL_sign(X509_CRL *, EVP_PKEY *, const EVP_MD *);\n\nint NETSCAPE_SPKI_verify(NETSCAPE_SPKI *, EVP_PKEY *);\nint NETSCAPE_SPKI_sign(NETSCAPE_SPKI *, EVP_PKEY *, const EVP_MD *);\nchar *NETSCAPE_SPKI_b64_encode(NETSCAPE_SPKI *);\nEVP_PKEY *NETSCAPE_SPKI_get_pubkey(NETSCAPE_SPKI *);\nint NETSCAPE_SPKI_set_pubkey(NETSCAPE_SPKI *, EVP_PKEY *);\nNETSCAPE_SPKI *NETSCAPE_SPKI_new(void);\nvoid NETSCAPE_SPKI_free(NETSCAPE_SPKI *);\n\n/* ASN1 serialization */\nint i2d_X509_bio(BIO *, X509 *);\nX509 *d2i_X509_bio(BIO *, X509 **);\n\nint i2d_X509_REQ_bio(BIO *, X509_REQ *);\nX509_REQ *d2i_X509_REQ_bio(BIO *, X509_REQ **);\n\nint i2d_PrivateKey_bio(BIO *, EVP_PKEY *);\nEVP_PKEY *d2i_PrivateKey_bio(BIO *, EVP_PKEY **);\n\nASN1_INTEGER *X509_get_serialNumber(X509 *);\nint X509_set_serialNumber(X509 *, ASN1_INTEGER *);\n\n/* X509_STORE */\nX509_STORE *X509_STORE_new(void);\nvoid X509_STORE_free(X509_STORE *);\nint X509_STORE_add_cert(X509_STORE *, X509 *);\nint X509_verify_cert(X509_STORE_CTX *);\n\"\"\"\n\nMACROS = \"\"\"\nlong X509_get_version(X509 *);\n\nASN1_TIME *X509_get_notBefore(X509 *);\nASN1_TIME *X509_get_notAfter(X509 *);\n\nlong X509_REQ_get_version(X509_REQ *);\nX509_NAME *X509_REQ_get_subject_name(X509_REQ *);\n\nCryptography_STACK_OF_X509 *sk_X509_new_null(void);\nvoid sk_X509_free(Cryptography_STACK_OF_X509 *);\nint sk_X509_num(Cryptography_STACK_OF_X509 *);\nint sk_X509_push(Cryptography_STACK_OF_X509 *, X509 *);\nX509 *sk_X509_value(Cryptography_STACK_OF_X509 *, int);\n\nX509_EXTENSIONS *sk_X509_EXTENSION_new_null(void);\nint sk_X509_EXTENSION_num(X509_EXTENSIONS *);\nX509_EXTENSION *sk_X509_EXTENSION_value(X509_EXTENSIONS *, int);\nint sk_X509_EXTENSION_push(X509_EXTENSIONS *, X509_EXTENSION *);\nX509_EXTENSION *sk_X509_EXTENSION_delete(X509_EXTENSIONS *, int);\nvoid sk_X509_EXTENSION_free(X509_EXTENSIONS *);\n\nint sk_X509_REVOKED_num(Cryptography_STACK_OF_X509_REVOKED *);\nX509_REVOKED *sk_X509_REVOKED_value(Cryptography_STACK_OF_X509_REVOKED *, int);\n\n/* These aren't macros these arguments are all const X on openssl > 1.0.x */\nint X509_CRL_set_lastUpdate(X509_CRL *, ASN1_TIME *);\nint X509_CRL_set_nextUpdate(X509_CRL *, ASN1_TIME *);\n\"\"\"\n\nCUSTOMIZATIONS = \"\"\"\n\"\"\"\n\nCONDITIONAL_NAMES = {}\n",
"path": "cryptography/hazmat/bindings/openssl/x509.py"
}
] | [
{
"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nINCLUDES = \"\"\"\n#include <openssl/ssl.h>\n\n/*\n * This is part of a work-around for the difficulty cffi has in dealing with\n * `STACK_OF(foo)` as the name of a type. We invent a new, simpler name that\n * will be an alias for this type and use the alias throughout. This works\n * together with another opaque typedef for the same name in the TYPES section.\n * Note that the result is an opaque type.\n */\ntypedef STACK_OF(X509) Cryptography_STACK_OF_X509;\ntypedef STACK_OF(X509_REVOKED) Cryptography_STACK_OF_X509_REVOKED;\n\"\"\"\n\nTYPES = \"\"\"\ntypedef ... Cryptography_STACK_OF_X509;\ntypedef ... Cryptography_STACK_OF_X509_REVOKED;\n\ntypedef struct {\n ASN1_OBJECT *algorithm;\n ...;\n} X509_ALGOR;\n\ntypedef struct {\n X509_ALGOR *signature;\n ...;\n} X509_CINF;\n\ntypedef struct {\n ASN1_OBJECT *object;\n ASN1_BOOLEAN critical;\n ASN1_OCTET_STRING *value;\n} X509_EXTENSION;\n\ntypedef ... X509_EXTENSIONS;\n\ntypedef ... X509_REQ;\n\ntypedef struct {\n ASN1_INTEGER *serialNumber;\n ASN1_TIME *revocationDate;\n X509_EXTENSIONS *extensions;\n int sequence;\n ...;\n} X509_REVOKED;\n\ntypedef struct {\n Cryptography_STACK_OF_X509_REVOKED *revoked;\n ...;\n} X509_CRL_INFO;\n\ntypedef struct {\n X509_CRL_INFO *crl;\n ...;\n} X509_CRL;\n\ntypedef struct {\n X509_CINF *cert_info;\n ...;\n} X509;\n\ntypedef ... X509_STORE;\ntypedef ... NETSCAPE_SPKI;\n\"\"\"\n\nFUNCTIONS = \"\"\"\nX509 *X509_new(void);\nvoid X509_free(X509 *);\nX509 *X509_dup(X509 *);\n\nint X509_print_ex(BIO *, X509 *, unsigned long, unsigned long);\n\nint X509_set_version(X509 *, long);\n\nEVP_PKEY *X509_get_pubkey(X509 *);\nint X509_set_pubkey(X509 *, EVP_PKEY *);\n\nunsigned char *X509_alias_get0(X509 *, int *);\nint X509_sign(X509 *, EVP_PKEY *, const EVP_MD *);\n\nint X509_digest(const X509 *, const EVP_MD *, unsigned char *, unsigned int *);\n\nASN1_TIME *X509_gmtime_adj(ASN1_TIME *, long);\n\nunsigned long X509_subject_name_hash(X509 *);\n\nX509_NAME *X509_get_subject_name(X509 *);\nint X509_set_subject_name(X509 *, X509_NAME *);\n\nX509_NAME *X509_get_issuer_name(X509 *);\nint X509_set_issuer_name(X509 *, X509_NAME *);\n\nint X509_get_ext_count(X509 *);\nint X509_add_ext(X509 *, X509_EXTENSION *, int);\nX509_EXTENSION *X509_EXTENSION_dup(X509_EXTENSION *);\nX509_EXTENSION *X509_get_ext(X509 *, int);\nint X509_EXTENSION_get_critical(X509_EXTENSION *);\nASN1_OBJECT *X509_EXTENSION_get_object(X509_EXTENSION *);\nvoid X509_EXTENSION_free(X509_EXTENSION *);\n\nint X509_REQ_set_version(X509_REQ *, long);\nX509_REQ *X509_REQ_new(void);\nvoid X509_REQ_free(X509_REQ *);\nint X509_REQ_set_pubkey(X509_REQ *, EVP_PKEY *);\nint X509_REQ_sign(X509_REQ *, EVP_PKEY *, const EVP_MD *);\nint X509_REQ_verify(X509_REQ *, EVP_PKEY *);\nEVP_PKEY *X509_REQ_get_pubkey(X509_REQ *);\nint X509_REQ_add_extensions(X509_REQ *, X509_EXTENSIONS *);\nX509_EXTENSIONS *X509_REQ_get_extensions(X509_REQ *);\nint X509_REQ_print_ex(BIO *, X509_REQ *, unsigned long, unsigned long);\n\nint X509V3_EXT_print(BIO *, X509_EXTENSION *, unsigned long, int);\nASN1_OCTET_STRING *X509_EXTENSION_get_data(X509_EXTENSION *);\n\nX509_REVOKED *X509_REVOKED_new(void);\nvoid X509_REVOKED_free(X509_REVOKED *);\n\nint X509_REVOKED_set_serialNumber(X509_REVOKED *, ASN1_INTEGER *);\n\nint X509_REVOKED_add1_ext_i2d(X509_REVOKED *, int, void *, int, unsigned long);\n\nX509_CRL *d2i_X509_CRL_bio(BIO *, X509_CRL **);\nX509_CRL *X509_CRL_new(void);\nvoid X509_CRL_free(X509_CRL *);\nint X509_CRL_add0_revoked(X509_CRL *, X509_REVOKED *);\nint i2d_X509_CRL_bio(BIO *, X509_CRL *);\nint X509_CRL_print(BIO *, X509_CRL *);\nint X509_CRL_set_issuer_name(X509_CRL *, X509_NAME *);\nint X509_CRL_sign(X509_CRL *, EVP_PKEY *, const EVP_MD *);\n\nint NETSCAPE_SPKI_verify(NETSCAPE_SPKI *, EVP_PKEY *);\nint NETSCAPE_SPKI_sign(NETSCAPE_SPKI *, EVP_PKEY *, const EVP_MD *);\nchar *NETSCAPE_SPKI_b64_encode(NETSCAPE_SPKI *);\nEVP_PKEY *NETSCAPE_SPKI_get_pubkey(NETSCAPE_SPKI *);\nint NETSCAPE_SPKI_set_pubkey(NETSCAPE_SPKI *, EVP_PKEY *);\nNETSCAPE_SPKI *NETSCAPE_SPKI_new(void);\nvoid NETSCAPE_SPKI_free(NETSCAPE_SPKI *);\n\n/* ASN1 serialization */\nint i2d_X509_bio(BIO *, X509 *);\nX509 *d2i_X509_bio(BIO *, X509 **);\n\nint i2d_X509_REQ_bio(BIO *, X509_REQ *);\nX509_REQ *d2i_X509_REQ_bio(BIO *, X509_REQ **);\n\nint i2d_PrivateKey_bio(BIO *, EVP_PKEY *);\nEVP_PKEY *d2i_PrivateKey_bio(BIO *, EVP_PKEY **);\n\nASN1_INTEGER *X509_get_serialNumber(X509 *);\nint X509_set_serialNumber(X509 *, ASN1_INTEGER *);\n\n/* X509_STORE */\nX509_STORE *X509_STORE_new(void);\nvoid X509_STORE_free(X509_STORE *);\nint X509_STORE_add_cert(X509_STORE *, X509 *);\nint X509_verify_cert(X509_STORE_CTX *);\n\nconst char *X509_verify_cert_error_string(long);\n\"\"\"\n\nMACROS = \"\"\"\nlong X509_get_version(X509 *);\n\nASN1_TIME *X509_get_notBefore(X509 *);\nASN1_TIME *X509_get_notAfter(X509 *);\n\nlong X509_REQ_get_version(X509_REQ *);\nX509_NAME *X509_REQ_get_subject_name(X509_REQ *);\n\nCryptography_STACK_OF_X509 *sk_X509_new_null(void);\nvoid sk_X509_free(Cryptography_STACK_OF_X509 *);\nint sk_X509_num(Cryptography_STACK_OF_X509 *);\nint sk_X509_push(Cryptography_STACK_OF_X509 *, X509 *);\nX509 *sk_X509_value(Cryptography_STACK_OF_X509 *, int);\n\nX509_EXTENSIONS *sk_X509_EXTENSION_new_null(void);\nint sk_X509_EXTENSION_num(X509_EXTENSIONS *);\nX509_EXTENSION *sk_X509_EXTENSION_value(X509_EXTENSIONS *, int);\nint sk_X509_EXTENSION_push(X509_EXTENSIONS *, X509_EXTENSION *);\nX509_EXTENSION *sk_X509_EXTENSION_delete(X509_EXTENSIONS *, int);\nvoid sk_X509_EXTENSION_free(X509_EXTENSIONS *);\n\nint sk_X509_REVOKED_num(Cryptography_STACK_OF_X509_REVOKED *);\nX509_REVOKED *sk_X509_REVOKED_value(Cryptography_STACK_OF_X509_REVOKED *, int);\n\n/* These aren't macros these arguments are all const X on openssl > 1.0.x */\nint X509_CRL_set_lastUpdate(X509_CRL *, ASN1_TIME *);\nint X509_CRL_set_nextUpdate(X509_CRL *, ASN1_TIME *);\n\"\"\"\n\nCUSTOMIZATIONS = \"\"\"\n\"\"\"\n\nCONDITIONAL_NAMES = {}\n",
"path": "cryptography/hazmat/bindings/openssl/x509.py"
}
] | diff --git a/cryptography/hazmat/bindings/openssl/x509.py b/cryptography/hazmat/bindings/openssl/x509.py
index e4021a12c7f2..74259b3d5e27 100644
--- a/cryptography/hazmat/bindings/openssl/x509.py
+++ b/cryptography/hazmat/bindings/openssl/x509.py
@@ -167,6 +167,8 @@
void X509_STORE_free(X509_STORE *);
int X509_STORE_add_cert(X509_STORE *, X509 *);
int X509_verify_cert(X509_STORE_CTX *);
+
+const char *X509_verify_cert_error_string(long);
"""
MACROS = """
|
cisagov__manage.get.gov-1618 | Content Review: Request flow pages
### Issue description
Review the pages identified within the following sections of [this spreadsheet](https://docs.google.com/spreadsheets/d/18wwmEioSr6BU9Y6G6ihqzS7P1OaE65Ifoipg9-Gxcjo/edit#gid=1246252690) (under "Registrar" tab):
- Domain request
- Error pages
While reviewing those sections, also review the content that appears on any linked pages that lead to beta.get.gov. For example, the "Authorizing Official" page links to a corresponding page on beta.get.gov. Review that content, as well, to ensure the information is accurate.
Use the [Content Review Checklist](https://docs.google.com/document/d/13JrJZOAQCkt-G5zKiIzBoNg_4ZJm_-FxGH2aN2fS86s/edit#heading=h.41lgwrhbhke5) as a guide for doing the review.
### Acceptance criteria
- [x] All pages identified for "Domain request" and "Error pages" have been reviewed and updates have been made in GitHub.
- [x] All linked pages that lead to beta.get.gov have also been reviewed and updates have been made in GitHub
- [x] The status for each reviewed page is updated in [this spreadsheet](https://docs.google.com/spreadsheets/d/18wwmEioSr6BU9Y6G6ihqzS7P1OaE65Ifoipg9-Gxcjo/edit#gid=1246252690) to indicate the outcome of the review.
- [x] Any dev tickets identified during the review have been created.
### Additional context
_No response_
### Links to other issues
URLs for this content review will be gathered via this ticket #1244
| [
{
"content": "from __future__ import annotations # allows forward references in annotations\nfrom itertools import zip_longest\nimport logging\nfrom typing import Callable\nfrom phonenumber_field.formfields import PhoneNumberField # type: ignore\n\nfrom django import forms\nfrom django.core.validators import RegexValidator, MaxLengthValidator\nfrom django.utils.safestring import mark_safe\n\nfrom api.views import DOMAIN_API_MESSAGES\n\nfrom registrar.models import Contact, DomainApplication, DraftDomain, Domain\nfrom registrar.templatetags.url_helpers import public_site_url\nfrom registrar.utility import errors\n\nlogger = logging.getLogger(__name__)\n\n\nclass RegistrarForm(forms.Form):\n \"\"\"\n A common set of methods and configuration.\n\n The registrar's domain application is several pages of \"steps\".\n Each step is an HTML form containing one or more Django \"forms\".\n\n Subclass this class to create new forms.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault(\"label_suffix\", \"\")\n # save a reference to an application object\n self.application = kwargs.pop(\"application\", None)\n super(RegistrarForm, self).__init__(*args, **kwargs)\n\n def to_database(self, obj: DomainApplication | Contact):\n \"\"\"\n Adds this form's cleaned data to `obj` and saves `obj`.\n\n Does nothing if form is not valid.\n \"\"\"\n if not self.is_valid():\n return\n for name, value in self.cleaned_data.items():\n setattr(obj, name, value)\n obj.save()\n\n @classmethod\n def from_database(cls, obj: DomainApplication | Contact | None):\n \"\"\"Returns a dict of form field values gotten from `obj`.\"\"\"\n if obj is None:\n return {}\n return {name: getattr(obj, name) for name in cls.declared_fields.keys()} # type: ignore\n\n\nclass RegistrarFormSet(forms.BaseFormSet):\n \"\"\"\n As with RegistrarForm, a common set of methods and configuration.\n\n Subclass this class to create new formsets.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n # save a reference to an application object\n self.application = kwargs.pop(\"application\", None)\n super(RegistrarFormSet, self).__init__(*args, **kwargs)\n # quick workaround to ensure that the HTML `required`\n # attribute shows up on required fields for any forms\n # in the formset which have data already (stated another\n # way: you can leave a form in the formset blank, but\n # if you opt to fill it out, you must fill it out _right_)\n for index in range(self.initial_form_count()):\n self.forms[index].use_required_attribute = True\n\n def should_delete(self, cleaned):\n \"\"\"Should this entry be deleted from the database?\"\"\"\n raise NotImplementedError\n\n def pre_update(self, db_obj, cleaned):\n \"\"\"Code to run before an item in the formset is saved.\"\"\"\n for key, value in cleaned.items():\n setattr(db_obj, key, value)\n\n def pre_create(self, db_obj, cleaned):\n \"\"\"Code to run before an item in the formset is created in the database.\"\"\"\n return cleaned\n\n def to_database(self, obj: DomainApplication):\n \"\"\"\n Adds this form's cleaned data to `obj` and saves `obj`.\n\n Does nothing if form is not valid.\n\n Hint: Subclass should call `self._to_database(...)`.\n \"\"\"\n raise NotImplementedError\n\n def _to_database(\n self,\n obj: DomainApplication,\n join: str,\n should_delete: Callable,\n pre_update: Callable,\n pre_create: Callable,\n ):\n \"\"\"\n Performs the actual work of saving.\n\n Has hooks such as `should_delete` and `pre_update` by which the\n subclass can control behavior. Add more hooks whenever needed.\n \"\"\"\n if not self.is_valid():\n return\n obj.save()\n\n query = getattr(obj, join).order_by(\"created_at\").all() # order matters\n\n # the use of `zip` pairs the forms in the formset with the\n # related objects gotten from the database -- there should always be\n # at least as many forms as database entries: extra forms means new\n # entries, but fewer forms is _not_ the correct way to delete items\n # (likely a client-side error or an attempt at data tampering)\n\n for db_obj, post_data in zip_longest(query, self.forms, fillvalue=None):\n cleaned = post_data.cleaned_data if post_data is not None else {}\n\n # matching database object exists, update it\n if db_obj is not None and cleaned:\n if should_delete(cleaned):\n db_obj.delete()\n continue\n else:\n pre_update(db_obj, cleaned)\n db_obj.save()\n\n # no matching database object, create it\n elif db_obj is None and cleaned:\n kwargs = pre_create(db_obj, cleaned)\n getattr(obj, join).create(**kwargs)\n\n @classmethod\n def on_fetch(cls, query):\n \"\"\"Code to run when fetching formset's objects from the database.\"\"\"\n return query.values()\n\n @classmethod\n def from_database(cls, obj: DomainApplication, join: str, on_fetch: Callable):\n \"\"\"Returns a dict of form field values gotten from `obj`.\"\"\"\n return on_fetch(getattr(obj, join).order_by(\"created_at\")) # order matters\n\n\nclass OrganizationTypeForm(RegistrarForm):\n organization_type = forms.ChoiceField(\n # use the long names in the application form\n choices=DomainApplication.OrganizationChoicesVerbose.choices,\n widget=forms.RadioSelect,\n error_messages={\"required\": \"Select the type of organization you represent.\"},\n )\n\n\nclass TribalGovernmentForm(RegistrarForm):\n federally_recognized_tribe = forms.BooleanField(\n label=\"Federally-recognized tribe \",\n required=False,\n )\n\n state_recognized_tribe = forms.BooleanField(\n label=\"State-recognized tribe \",\n required=False,\n )\n\n tribe_name = forms.CharField(\n label=\"What is the name of the tribe you represent?\",\n error_messages={\"required\": \"Enter the tribe you represent.\"},\n )\n\n def clean(self):\n \"\"\"Needs to be either state or federally recognized.\"\"\"\n if not (self.cleaned_data[\"federally_recognized_tribe\"] or self.cleaned_data[\"state_recognized_tribe\"]):\n raise forms.ValidationError(\n # no sec because we are using it to include an internal URL\n # into a link. There should be no user-facing input in the\n # HTML indicated here.\n mark_safe( # nosec\n \"You can’t complete this application yet. \"\n \"Only tribes recognized by the U.S. federal government \"\n \"or by a U.S. state government are eligible for .gov \"\n 'domains. Use our <a href=\"{}\">contact form</a> to '\n \"tell us more about your tribe and why you want a .gov \"\n \"domain. We’ll review your information and get back \"\n \"to you.\".format(public_site_url(\"contact\"))\n ),\n code=\"invalid\",\n )\n\n\nclass OrganizationFederalForm(RegistrarForm):\n federal_type = forms.ChoiceField(\n choices=DomainApplication.BranchChoices.choices,\n widget=forms.RadioSelect,\n error_messages={\"required\": (\"Select the part of the federal government your organization is in.\")},\n )\n\n\nclass OrganizationElectionForm(RegistrarForm):\n is_election_board = forms.NullBooleanField(\n widget=forms.RadioSelect(\n choices=[\n (True, \"Yes\"),\n (False, \"No\"),\n ],\n )\n )\n\n def clean_is_election_board(self):\n \"\"\"This box must be checked to proceed but offer a clear error.\"\"\"\n # already converted to a boolean\n is_election_board = self.cleaned_data[\"is_election_board\"]\n if is_election_board is None:\n raise forms.ValidationError(\n (\"Select “Yes” if you represent an election office. Select “No” if you don’t.\"),\n code=\"required\",\n )\n return is_election_board\n\n\nclass OrganizationContactForm(RegistrarForm):\n # for federal agencies we also want to know the top-level agency.\n federal_agency = forms.ChoiceField(\n label=\"Federal agency\",\n # not required because this field won't be filled out unless\n # it is a federal agency. Use clean to check programatically\n # if it has been filled in when required.\n required=False,\n choices=[(\"\", \"--Select--\")] + DomainApplication.AGENCY_CHOICES,\n )\n organization_name = forms.CharField(\n label=\"Organization name\",\n error_messages={\"required\": \"Enter the name of your organization.\"},\n )\n address_line1 = forms.CharField(\n label=\"Street address\",\n error_messages={\"required\": \"Enter the street address of your organization.\"},\n )\n address_line2 = forms.CharField(\n required=False,\n label=\"Street address line 2 (optional)\",\n )\n city = forms.CharField(\n label=\"City\",\n error_messages={\"required\": \"Enter the city where your organization is located.\"},\n )\n state_territory = forms.ChoiceField(\n label=\"State, territory, or military post\",\n choices=[(\"\", \"--Select--\")] + DomainApplication.StateTerritoryChoices.choices,\n error_messages={\n \"required\": (\"Select the state, territory, or military post where your organization is located.\")\n },\n )\n zipcode = forms.CharField(\n label=\"Zip code\",\n validators=[\n RegexValidator(\n \"^[0-9]{5}(?:-[0-9]{4})?$|^$\",\n message=\"Enter a zip code in the form of 12345 or 12345-6789.\",\n )\n ],\n )\n urbanization = forms.CharField(\n required=False,\n label=\"Urbanization (required for Puerto Rico only)\",\n )\n\n def clean_federal_agency(self):\n \"\"\"Require something to be selected when this is a federal agency.\"\"\"\n federal_agency = self.cleaned_data.get(\"federal_agency\", None)\n # need the application object to know if this is federal\n if self.application is None:\n # hmm, no saved application object?, default require the agency\n if not federal_agency:\n # no answer was selected\n raise forms.ValidationError(\n \"Select the federal agency your organization is in.\",\n code=\"required\",\n )\n if self.application.is_federal():\n if not federal_agency:\n # no answer was selected\n raise forms.ValidationError(\n \"Select the federal agency your organization is in.\",\n code=\"required\",\n )\n return federal_agency\n\n\nclass AboutYourOrganizationForm(RegistrarForm):\n about_your_organization = forms.CharField(\n label=\"About your organization\",\n widget=forms.Textarea(),\n validators=[\n MaxLengthValidator(\n 1000,\n message=\"Response must be less than 1000 characters.\",\n )\n ],\n error_messages={\"required\": (\"Enter more information about your organization.\")},\n )\n\n\nclass AuthorizingOfficialForm(RegistrarForm):\n def to_database(self, obj):\n if not self.is_valid():\n return\n contact = getattr(obj, \"authorizing_official\", None)\n if contact is not None:\n super().to_database(contact)\n else:\n contact = Contact()\n super().to_database(contact)\n obj.authorizing_official = contact\n obj.save()\n\n @classmethod\n def from_database(cls, obj):\n contact = getattr(obj, \"authorizing_official\", None)\n return super().from_database(contact)\n\n first_name = forms.CharField(\n label=\"First name / given name\",\n error_messages={\"required\": (\"Enter the first name / given name of your authorizing official.\")},\n )\n last_name = forms.CharField(\n label=\"Last name / family name\",\n error_messages={\"required\": (\"Enter the last name / family name of your authorizing official.\")},\n )\n title = forms.CharField(\n label=\"Title or role in your organization\",\n error_messages={\n \"required\": (\n \"Enter the title or role your authorizing official has in your\"\n \" organization (e.g., Chief Information Officer).\"\n )\n },\n )\n email = forms.EmailField(\n label=\"Email\",\n error_messages={\"invalid\": (\"Enter an email address in the required format, like [email protected].\")},\n )\n\n\nclass CurrentSitesForm(RegistrarForm):\n website = forms.URLField(\n required=False,\n label=\"Public website\",\n error_messages={\n \"invalid\": (\"Enter your organization's current website in the required format, like example.com.\")\n },\n )\n\n\nclass BaseCurrentSitesFormSet(RegistrarFormSet):\n JOIN = \"current_websites\"\n\n def should_delete(self, cleaned):\n website = cleaned.get(\"website\", \"\")\n return website.strip() == \"\"\n\n def to_database(self, obj: DomainApplication):\n self._to_database(obj, self.JOIN, self.should_delete, self.pre_update, self.pre_create)\n\n @classmethod\n def from_database(cls, obj):\n return super().from_database(obj, cls.JOIN, cls.on_fetch)\n\n\nCurrentSitesFormSet = forms.formset_factory(\n CurrentSitesForm,\n extra=1,\n absolute_max=1500, # django default; use `max_num` to limit entries\n formset=BaseCurrentSitesFormSet,\n)\n\n\nclass AlternativeDomainForm(RegistrarForm):\n def clean_alternative_domain(self):\n \"\"\"Validation code for domain names.\"\"\"\n try:\n requested = self.cleaned_data.get(\"alternative_domain\", None)\n validated = DraftDomain.validate(requested, blank_ok=True)\n except errors.ExtraDotsError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"extra_dots\"], code=\"extra_dots\")\n except errors.DomainUnavailableError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"unavailable\"], code=\"unavailable\")\n except errors.RegistrySystemError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"error\"], code=\"error\")\n except ValueError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"invalid\"], code=\"invalid\")\n return validated\n\n alternative_domain = forms.CharField(\n required=False,\n label=\"\",\n )\n\n\nclass BaseAlternativeDomainFormSet(RegistrarFormSet):\n JOIN = \"alternative_domains\"\n\n def should_delete(self, cleaned):\n domain = cleaned.get(\"alternative_domain\", \"\")\n return domain.strip() == \"\"\n\n def pre_update(self, db_obj, cleaned):\n domain = cleaned.get(\"alternative_domain\", None)\n if domain is not None:\n db_obj.website = f\"{domain}.gov\"\n\n def pre_create(self, db_obj, cleaned):\n domain = cleaned.get(\"alternative_domain\", None)\n if domain is not None:\n return {\"website\": f\"{domain}.gov\"}\n else:\n return {}\n\n def to_database(self, obj: DomainApplication):\n self._to_database(obj, self.JOIN, self.should_delete, self.pre_update, self.pre_create)\n\n @classmethod\n def on_fetch(cls, query):\n return [{\"alternative_domain\": Domain.sld(domain.website)} for domain in query]\n\n @classmethod\n def from_database(cls, obj):\n return super().from_database(obj, cls.JOIN, cls.on_fetch)\n\n\nAlternativeDomainFormSet = forms.formset_factory(\n AlternativeDomainForm,\n extra=1,\n absolute_max=1500, # django default; use `max_num` to limit entries\n formset=BaseAlternativeDomainFormSet,\n)\n\n\nclass DotGovDomainForm(RegistrarForm):\n def to_database(self, obj):\n if not self.is_valid():\n return\n domain = self.cleaned_data.get(\"requested_domain\", None)\n if domain:\n requested_domain = getattr(obj, \"requested_domain\", None)\n if requested_domain is not None:\n requested_domain.name = f\"{domain}.gov\"\n requested_domain.save()\n else:\n requested_domain = DraftDomain.objects.create(name=f\"{domain}.gov\")\n obj.requested_domain = requested_domain\n obj.save()\n\n obj.save()\n\n @classmethod\n def from_database(cls, obj):\n values = {}\n requested_domain = getattr(obj, \"requested_domain\", None)\n if requested_domain is not None:\n values[\"requested_domain\"] = Domain.sld(requested_domain.name)\n return values\n\n def clean_requested_domain(self):\n \"\"\"Validation code for domain names.\"\"\"\n try:\n requested = self.cleaned_data.get(\"requested_domain\", None)\n validated = DraftDomain.validate(requested)\n except errors.BlankValueError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"required\"], code=\"required\")\n except errors.ExtraDotsError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"extra_dots\"], code=\"extra_dots\")\n except errors.DomainUnavailableError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"unavailable\"], code=\"unavailable\")\n except errors.RegistrySystemError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"error\"], code=\"error\")\n except ValueError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"invalid\"], code=\"invalid\")\n return validated\n\n requested_domain = forms.CharField(label=\"What .gov domain do you want?\")\n\n\nclass PurposeForm(RegistrarForm):\n purpose = forms.CharField(\n label=\"Purpose\",\n widget=forms.Textarea(),\n validators=[\n MaxLengthValidator(\n 1000,\n message=\"Response must be less than 1000 characters.\",\n )\n ],\n error_messages={\"required\": \"Describe how you'll use the .gov domain you’re requesting.\"},\n )\n\n\nclass YourContactForm(RegistrarForm):\n def to_database(self, obj):\n if not self.is_valid():\n return\n contact = getattr(obj, \"submitter\", None)\n if contact is not None:\n super().to_database(contact)\n else:\n contact = Contact()\n super().to_database(contact)\n obj.submitter = contact\n obj.save()\n\n @classmethod\n def from_database(cls, obj):\n contact = getattr(obj, \"submitter\", None)\n return super().from_database(contact)\n\n first_name = forms.CharField(\n label=\"First name / given name\",\n error_messages={\"required\": \"Enter your first name / given name.\"},\n )\n middle_name = forms.CharField(\n required=False,\n label=\"Middle name (optional)\",\n )\n last_name = forms.CharField(\n label=\"Last name / family name\",\n error_messages={\"required\": \"Enter your last name / family name.\"},\n )\n title = forms.CharField(\n label=\"Title or role in your organization\",\n error_messages={\n \"required\": (\"Enter your title or role in your organization (e.g., Chief Information Officer).\")\n },\n )\n email = forms.EmailField(\n label=\"Email\",\n error_messages={\"invalid\": (\"Enter your email address in the required format, like [email protected].\")},\n )\n phone = PhoneNumberField(\n label=\"Phone\",\n error_messages={\"invalid\": \"Enter a valid 10-digit phone number.\", \"required\": \"Enter your phone number.\"},\n )\n\n\nclass OtherContactsForm(RegistrarForm):\n first_name = forms.CharField(\n label=\"First name / given name\",\n error_messages={\"required\": \"Enter the first name / given name of this contact.\"},\n )\n middle_name = forms.CharField(\n required=False,\n label=\"Middle name (optional)\",\n )\n last_name = forms.CharField(\n label=\"Last name / family name\",\n error_messages={\"required\": \"Enter the last name / family name of this contact.\"},\n )\n title = forms.CharField(\n label=\"Title or role in your organization\",\n error_messages={\n \"required\": (\n \"Enter the title or role in your organization of this contact (e.g., Chief Information Officer).\"\n )\n },\n )\n email = forms.EmailField(\n label=\"Email\",\n error_messages={\"invalid\": (\"Enter an email address in the required format, like [email protected].\")},\n )\n phone = PhoneNumberField(\n label=\"Phone\",\n error_messages={\n \"invalid\": \"Enter a valid 10-digit phone number.\",\n \"required\": \"Enter a phone number for this contact.\",\n },\n )\n\n def clean(self):\n \"\"\"\n This method overrides the default behavior for forms.\n This cleans the form after field validation has already taken place.\n In this override, allow for a form which is empty to be considered\n valid even though certain required fields have not passed field\n validation\n \"\"\"\n\n # Set form_is_empty to True initially\n form_is_empty = True\n for name, field in self.fields.items():\n # get the value of the field from the widget\n value = field.widget.value_from_datadict(self.data, self.files, self.add_prefix(name))\n # if any field in the submitted form is not empty, set form_is_empty to False\n if value is not None and value != \"\":\n form_is_empty = False\n\n if form_is_empty:\n # clear any errors raised by the form fields\n # (before this clean() method is run, each field\n # performs its own clean, which could result in\n # errors that we wish to ignore at this point)\n #\n # NOTE: we cannot just clear() the errors list.\n # That causes problems.\n for field in self.fields:\n if field in self.errors:\n del self.errors[field]\n\n return self.cleaned_data\n\n\nclass BaseOtherContactsFormSet(RegistrarFormSet):\n JOIN = \"other_contacts\"\n\n def should_delete(self, cleaned):\n empty = (isinstance(v, str) and (v.strip() == \"\" or v is None) for v in cleaned.values())\n return all(empty)\n\n def to_database(self, obj: DomainApplication):\n self._to_database(obj, self.JOIN, self.should_delete, self.pre_update, self.pre_create)\n\n @classmethod\n def from_database(cls, obj):\n return super().from_database(obj, cls.JOIN, cls.on_fetch)\n\n\nOtherContactsFormSet = forms.formset_factory(\n OtherContactsForm,\n extra=1,\n absolute_max=1500, # django default; use `max_num` to limit entries\n formset=BaseOtherContactsFormSet,\n)\n\n\nclass NoOtherContactsForm(RegistrarForm):\n no_other_contacts_rationale = forms.CharField(\n required=True,\n # label has to end in a space to get the label_suffix to show\n label=(\n \"Please explain why there are no other employees from your organization \"\n \"we can contact to help us assess your eligibility for a .gov domain.\"\n ),\n widget=forms.Textarea(),\n validators=[\n MaxLengthValidator(\n 1000,\n message=\"Response must be less than 1000 characters.\",\n )\n ],\n )\n\n\nclass AnythingElseForm(RegistrarForm):\n anything_else = forms.CharField(\n required=False,\n label=\"Anything else?\",\n widget=forms.Textarea(),\n validators=[\n MaxLengthValidator(\n 1000,\n message=\"Response must be less than 1000 characters.\",\n )\n ],\n )\n\n\nclass RequirementsForm(RegistrarForm):\n is_policy_acknowledged = forms.BooleanField(\n label=\"I read and agree to the requirements for operating .gov domains.\",\n error_messages={\n \"required\": (\"Check the box if you read and agree to the requirements for operating .gov domains.\")\n },\n )\n",
"path": "src/registrar/forms/application_wizard.py"
}
] | [
{
"content": "from __future__ import annotations # allows forward references in annotations\nfrom itertools import zip_longest\nimport logging\nfrom typing import Callable\nfrom phonenumber_field.formfields import PhoneNumberField # type: ignore\n\nfrom django import forms\nfrom django.core.validators import RegexValidator, MaxLengthValidator\nfrom django.utils.safestring import mark_safe\n\nfrom api.views import DOMAIN_API_MESSAGES\n\nfrom registrar.models import Contact, DomainApplication, DraftDomain, Domain\nfrom registrar.templatetags.url_helpers import public_site_url\nfrom registrar.utility import errors\n\nlogger = logging.getLogger(__name__)\n\n\nclass RegistrarForm(forms.Form):\n \"\"\"\n A common set of methods and configuration.\n\n The registrar's domain application is several pages of \"steps\".\n Each step is an HTML form containing one or more Django \"forms\".\n\n Subclass this class to create new forms.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault(\"label_suffix\", \"\")\n # save a reference to an application object\n self.application = kwargs.pop(\"application\", None)\n super(RegistrarForm, self).__init__(*args, **kwargs)\n\n def to_database(self, obj: DomainApplication | Contact):\n \"\"\"\n Adds this form's cleaned data to `obj` and saves `obj`.\n\n Does nothing if form is not valid.\n \"\"\"\n if not self.is_valid():\n return\n for name, value in self.cleaned_data.items():\n setattr(obj, name, value)\n obj.save()\n\n @classmethod\n def from_database(cls, obj: DomainApplication | Contact | None):\n \"\"\"Returns a dict of form field values gotten from `obj`.\"\"\"\n if obj is None:\n return {}\n return {name: getattr(obj, name) for name in cls.declared_fields.keys()} # type: ignore\n\n\nclass RegistrarFormSet(forms.BaseFormSet):\n \"\"\"\n As with RegistrarForm, a common set of methods and configuration.\n\n Subclass this class to create new formsets.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n # save a reference to an application object\n self.application = kwargs.pop(\"application\", None)\n super(RegistrarFormSet, self).__init__(*args, **kwargs)\n # quick workaround to ensure that the HTML `required`\n # attribute shows up on required fields for any forms\n # in the formset which have data already (stated another\n # way: you can leave a form in the formset blank, but\n # if you opt to fill it out, you must fill it out _right_)\n for index in range(self.initial_form_count()):\n self.forms[index].use_required_attribute = True\n\n def should_delete(self, cleaned):\n \"\"\"Should this entry be deleted from the database?\"\"\"\n raise NotImplementedError\n\n def pre_update(self, db_obj, cleaned):\n \"\"\"Code to run before an item in the formset is saved.\"\"\"\n for key, value in cleaned.items():\n setattr(db_obj, key, value)\n\n def pre_create(self, db_obj, cleaned):\n \"\"\"Code to run before an item in the formset is created in the database.\"\"\"\n return cleaned\n\n def to_database(self, obj: DomainApplication):\n \"\"\"\n Adds this form's cleaned data to `obj` and saves `obj`.\n\n Does nothing if form is not valid.\n\n Hint: Subclass should call `self._to_database(...)`.\n \"\"\"\n raise NotImplementedError\n\n def _to_database(\n self,\n obj: DomainApplication,\n join: str,\n should_delete: Callable,\n pre_update: Callable,\n pre_create: Callable,\n ):\n \"\"\"\n Performs the actual work of saving.\n\n Has hooks such as `should_delete` and `pre_update` by which the\n subclass can control behavior. Add more hooks whenever needed.\n \"\"\"\n if not self.is_valid():\n return\n obj.save()\n\n query = getattr(obj, join).order_by(\"created_at\").all() # order matters\n\n # the use of `zip` pairs the forms in the formset with the\n # related objects gotten from the database -- there should always be\n # at least as many forms as database entries: extra forms means new\n # entries, but fewer forms is _not_ the correct way to delete items\n # (likely a client-side error or an attempt at data tampering)\n\n for db_obj, post_data in zip_longest(query, self.forms, fillvalue=None):\n cleaned = post_data.cleaned_data if post_data is not None else {}\n\n # matching database object exists, update it\n if db_obj is not None and cleaned:\n if should_delete(cleaned):\n db_obj.delete()\n continue\n else:\n pre_update(db_obj, cleaned)\n db_obj.save()\n\n # no matching database object, create it\n elif db_obj is None and cleaned:\n kwargs = pre_create(db_obj, cleaned)\n getattr(obj, join).create(**kwargs)\n\n @classmethod\n def on_fetch(cls, query):\n \"\"\"Code to run when fetching formset's objects from the database.\"\"\"\n return query.values()\n\n @classmethod\n def from_database(cls, obj: DomainApplication, join: str, on_fetch: Callable):\n \"\"\"Returns a dict of form field values gotten from `obj`.\"\"\"\n return on_fetch(getattr(obj, join).order_by(\"created_at\")) # order matters\n\n\nclass OrganizationTypeForm(RegistrarForm):\n organization_type = forms.ChoiceField(\n # use the long names in the application form\n choices=DomainApplication.OrganizationChoicesVerbose.choices,\n widget=forms.RadioSelect,\n error_messages={\"required\": \"Select the type of organization you represent.\"},\n )\n\n\nclass TribalGovernmentForm(RegistrarForm):\n federally_recognized_tribe = forms.BooleanField(\n label=\"Federally-recognized tribe \",\n required=False,\n )\n\n state_recognized_tribe = forms.BooleanField(\n label=\"State-recognized tribe \",\n required=False,\n )\n\n tribe_name = forms.CharField(\n label=\"Name of tribe\",\n error_messages={\"required\": \"Enter the tribe you represent.\"},\n )\n\n def clean(self):\n \"\"\"Needs to be either state or federally recognized.\"\"\"\n if not (self.cleaned_data[\"federally_recognized_tribe\"] or self.cleaned_data[\"state_recognized_tribe\"]):\n raise forms.ValidationError(\n # no sec because we are using it to include an internal URL\n # into a link. There should be no user-facing input in the\n # HTML indicated here.\n mark_safe( # nosec\n \"You can’t complete this application yet. \"\n \"Only tribes recognized by the U.S. federal government \"\n \"or by a U.S. state government are eligible for .gov \"\n 'domains. Use our <a href=\"{}\">contact form</a> to '\n \"tell us more about your tribe and why you want a .gov \"\n \"domain. We’ll review your information and get back \"\n \"to you.\".format(public_site_url(\"contact\"))\n ),\n code=\"invalid\",\n )\n\n\nclass OrganizationFederalForm(RegistrarForm):\n federal_type = forms.ChoiceField(\n choices=DomainApplication.BranchChoices.choices,\n widget=forms.RadioSelect,\n error_messages={\"required\": (\"Select the part of the federal government your organization is in.\")},\n )\n\n\nclass OrganizationElectionForm(RegistrarForm):\n is_election_board = forms.NullBooleanField(\n widget=forms.RadioSelect(\n choices=[\n (True, \"Yes\"),\n (False, \"No\"),\n ],\n )\n )\n\n def clean_is_election_board(self):\n \"\"\"This box must be checked to proceed but offer a clear error.\"\"\"\n # already converted to a boolean\n is_election_board = self.cleaned_data[\"is_election_board\"]\n if is_election_board is None:\n raise forms.ValidationError(\n (\"Select “Yes” if you represent an election office. Select “No” if you don’t.\"),\n code=\"required\",\n )\n return is_election_board\n\n\nclass OrganizationContactForm(RegistrarForm):\n # for federal agencies we also want to know the top-level agency.\n federal_agency = forms.ChoiceField(\n label=\"Federal agency\",\n # not required because this field won't be filled out unless\n # it is a federal agency. Use clean to check programatically\n # if it has been filled in when required.\n required=False,\n choices=[(\"\", \"--Select--\")] + DomainApplication.AGENCY_CHOICES,\n )\n organization_name = forms.CharField(\n label=\"Organization name\",\n error_messages={\"required\": \"Enter the name of your organization.\"},\n )\n address_line1 = forms.CharField(\n label=\"Street address\",\n error_messages={\"required\": \"Enter the street address of your organization.\"},\n )\n address_line2 = forms.CharField(\n required=False,\n label=\"Street address line 2 (optional)\",\n )\n city = forms.CharField(\n label=\"City\",\n error_messages={\"required\": \"Enter the city where your organization is located.\"},\n )\n state_territory = forms.ChoiceField(\n label=\"State, territory, or military post\",\n choices=[(\"\", \"--Select--\")] + DomainApplication.StateTerritoryChoices.choices,\n error_messages={\n \"required\": (\"Select the state, territory, or military post where your organization is located.\")\n },\n )\n zipcode = forms.CharField(\n label=\"Zip code\",\n validators=[\n RegexValidator(\n \"^[0-9]{5}(?:-[0-9]{4})?$|^$\",\n message=\"Enter a zip code in the form of 12345 or 12345-6789.\",\n )\n ],\n )\n urbanization = forms.CharField(\n required=False,\n label=\"Urbanization (required for Puerto Rico only)\",\n )\n\n def clean_federal_agency(self):\n \"\"\"Require something to be selected when this is a federal agency.\"\"\"\n federal_agency = self.cleaned_data.get(\"federal_agency\", None)\n # need the application object to know if this is federal\n if self.application is None:\n # hmm, no saved application object?, default require the agency\n if not federal_agency:\n # no answer was selected\n raise forms.ValidationError(\n \"Select the federal agency your organization is in.\",\n code=\"required\",\n )\n if self.application.is_federal():\n if not federal_agency:\n # no answer was selected\n raise forms.ValidationError(\n \"Select the federal agency your organization is in.\",\n code=\"required\",\n )\n return federal_agency\n\n\nclass AboutYourOrganizationForm(RegistrarForm):\n about_your_organization = forms.CharField(\n label=\"About your organization\",\n widget=forms.Textarea(),\n validators=[\n MaxLengthValidator(\n 1000,\n message=\"Response must be less than 1000 characters.\",\n )\n ],\n error_messages={\"required\": (\"Enter more information about your organization.\")},\n )\n\n\nclass AuthorizingOfficialForm(RegistrarForm):\n def to_database(self, obj):\n if not self.is_valid():\n return\n contact = getattr(obj, \"authorizing_official\", None)\n if contact is not None:\n super().to_database(contact)\n else:\n contact = Contact()\n super().to_database(contact)\n obj.authorizing_official = contact\n obj.save()\n\n @classmethod\n def from_database(cls, obj):\n contact = getattr(obj, \"authorizing_official\", None)\n return super().from_database(contact)\n\n first_name = forms.CharField(\n label=\"First name / given name\",\n error_messages={\"required\": (\"Enter the first name / given name of your authorizing official.\")},\n )\n last_name = forms.CharField(\n label=\"Last name / family name\",\n error_messages={\"required\": (\"Enter the last name / family name of your authorizing official.\")},\n )\n title = forms.CharField(\n label=\"Title or role in your organization\",\n error_messages={\n \"required\": (\n \"Enter the title or role your authorizing official has in your\"\n \" organization (e.g., Chief Information Officer).\"\n )\n },\n )\n email = forms.EmailField(\n label=\"Email\",\n error_messages={\"invalid\": (\"Enter an email address in the required format, like [email protected].\")},\n )\n\n\nclass CurrentSitesForm(RegistrarForm):\n website = forms.URLField(\n required=False,\n label=\"Public website\",\n error_messages={\n \"invalid\": (\"Enter your organization's current website in the required format, like example.com.\")\n },\n )\n\n\nclass BaseCurrentSitesFormSet(RegistrarFormSet):\n JOIN = \"current_websites\"\n\n def should_delete(self, cleaned):\n website = cleaned.get(\"website\", \"\")\n return website.strip() == \"\"\n\n def to_database(self, obj: DomainApplication):\n self._to_database(obj, self.JOIN, self.should_delete, self.pre_update, self.pre_create)\n\n @classmethod\n def from_database(cls, obj):\n return super().from_database(obj, cls.JOIN, cls.on_fetch)\n\n\nCurrentSitesFormSet = forms.formset_factory(\n CurrentSitesForm,\n extra=1,\n absolute_max=1500, # django default; use `max_num` to limit entries\n formset=BaseCurrentSitesFormSet,\n)\n\n\nclass AlternativeDomainForm(RegistrarForm):\n def clean_alternative_domain(self):\n \"\"\"Validation code for domain names.\"\"\"\n try:\n requested = self.cleaned_data.get(\"alternative_domain\", None)\n validated = DraftDomain.validate(requested, blank_ok=True)\n except errors.ExtraDotsError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"extra_dots\"], code=\"extra_dots\")\n except errors.DomainUnavailableError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"unavailable\"], code=\"unavailable\")\n except errors.RegistrySystemError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"error\"], code=\"error\")\n except ValueError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"invalid\"], code=\"invalid\")\n return validated\n\n alternative_domain = forms.CharField(\n required=False,\n label=\"\",\n )\n\n\nclass BaseAlternativeDomainFormSet(RegistrarFormSet):\n JOIN = \"alternative_domains\"\n\n def should_delete(self, cleaned):\n domain = cleaned.get(\"alternative_domain\", \"\")\n return domain.strip() == \"\"\n\n def pre_update(self, db_obj, cleaned):\n domain = cleaned.get(\"alternative_domain\", None)\n if domain is not None:\n db_obj.website = f\"{domain}.gov\"\n\n def pre_create(self, db_obj, cleaned):\n domain = cleaned.get(\"alternative_domain\", None)\n if domain is not None:\n return {\"website\": f\"{domain}.gov\"}\n else:\n return {}\n\n def to_database(self, obj: DomainApplication):\n self._to_database(obj, self.JOIN, self.should_delete, self.pre_update, self.pre_create)\n\n @classmethod\n def on_fetch(cls, query):\n return [{\"alternative_domain\": Domain.sld(domain.website)} for domain in query]\n\n @classmethod\n def from_database(cls, obj):\n return super().from_database(obj, cls.JOIN, cls.on_fetch)\n\n\nAlternativeDomainFormSet = forms.formset_factory(\n AlternativeDomainForm,\n extra=1,\n absolute_max=1500, # django default; use `max_num` to limit entries\n formset=BaseAlternativeDomainFormSet,\n)\n\n\nclass DotGovDomainForm(RegistrarForm):\n def to_database(self, obj):\n if not self.is_valid():\n return\n domain = self.cleaned_data.get(\"requested_domain\", None)\n if domain:\n requested_domain = getattr(obj, \"requested_domain\", None)\n if requested_domain is not None:\n requested_domain.name = f\"{domain}.gov\"\n requested_domain.save()\n else:\n requested_domain = DraftDomain.objects.create(name=f\"{domain}.gov\")\n obj.requested_domain = requested_domain\n obj.save()\n\n obj.save()\n\n @classmethod\n def from_database(cls, obj):\n values = {}\n requested_domain = getattr(obj, \"requested_domain\", None)\n if requested_domain is not None:\n values[\"requested_domain\"] = Domain.sld(requested_domain.name)\n return values\n\n def clean_requested_domain(self):\n \"\"\"Validation code for domain names.\"\"\"\n try:\n requested = self.cleaned_data.get(\"requested_domain\", None)\n validated = DraftDomain.validate(requested)\n except errors.BlankValueError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"required\"], code=\"required\")\n except errors.ExtraDotsError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"extra_dots\"], code=\"extra_dots\")\n except errors.DomainUnavailableError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"unavailable\"], code=\"unavailable\")\n except errors.RegistrySystemError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"error\"], code=\"error\")\n except ValueError:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[\"invalid\"], code=\"invalid\")\n return validated\n\n requested_domain = forms.CharField(label=\"What .gov domain do you want?\")\n\n\nclass PurposeForm(RegistrarForm):\n purpose = forms.CharField(\n label=\"Purpose\",\n widget=forms.Textarea(),\n validators=[\n MaxLengthValidator(\n 1000,\n message=\"Response must be less than 1000 characters.\",\n )\n ],\n error_messages={\"required\": \"Describe how you'll use the .gov domain you’re requesting.\"},\n )\n\n\nclass YourContactForm(RegistrarForm):\n def to_database(self, obj):\n if not self.is_valid():\n return\n contact = getattr(obj, \"submitter\", None)\n if contact is not None:\n super().to_database(contact)\n else:\n contact = Contact()\n super().to_database(contact)\n obj.submitter = contact\n obj.save()\n\n @classmethod\n def from_database(cls, obj):\n contact = getattr(obj, \"submitter\", None)\n return super().from_database(contact)\n\n first_name = forms.CharField(\n label=\"First name / given name\",\n error_messages={\"required\": \"Enter your first name / given name.\"},\n )\n middle_name = forms.CharField(\n required=False,\n label=\"Middle name (optional)\",\n )\n last_name = forms.CharField(\n label=\"Last name / family name\",\n error_messages={\"required\": \"Enter your last name / family name.\"},\n )\n title = forms.CharField(\n label=\"Title or role in your organization\",\n error_messages={\n \"required\": (\"Enter your title or role in your organization (e.g., Chief Information Officer).\")\n },\n )\n email = forms.EmailField(\n label=\"Email\",\n error_messages={\"invalid\": (\"Enter your email address in the required format, like [email protected].\")},\n )\n phone = PhoneNumberField(\n label=\"Phone\",\n error_messages={\"invalid\": \"Enter a valid 10-digit phone number.\", \"required\": \"Enter your phone number.\"},\n )\n\n\nclass OtherContactsForm(RegistrarForm):\n first_name = forms.CharField(\n label=\"First name / given name\",\n error_messages={\"required\": \"Enter the first name / given name of this contact.\"},\n )\n middle_name = forms.CharField(\n required=False,\n label=\"Middle name (optional)\",\n )\n last_name = forms.CharField(\n label=\"Last name / family name\",\n error_messages={\"required\": \"Enter the last name / family name of this contact.\"},\n )\n title = forms.CharField(\n label=\"Title or role in your organization\",\n error_messages={\n \"required\": (\n \"Enter the title or role in your organization of this contact (e.g., Chief Information Officer).\"\n )\n },\n )\n email = forms.EmailField(\n label=\"Email\",\n error_messages={\"invalid\": (\"Enter an email address in the required format, like [email protected].\")},\n )\n phone = PhoneNumberField(\n label=\"Phone\",\n error_messages={\n \"invalid\": \"Enter a valid 10-digit phone number.\",\n \"required\": \"Enter a phone number for this contact.\",\n },\n )\n\n def clean(self):\n \"\"\"\n This method overrides the default behavior for forms.\n This cleans the form after field validation has already taken place.\n In this override, allow for a form which is empty to be considered\n valid even though certain required fields have not passed field\n validation\n \"\"\"\n\n # Set form_is_empty to True initially\n form_is_empty = True\n for name, field in self.fields.items():\n # get the value of the field from the widget\n value = field.widget.value_from_datadict(self.data, self.files, self.add_prefix(name))\n # if any field in the submitted form is not empty, set form_is_empty to False\n if value is not None and value != \"\":\n form_is_empty = False\n\n if form_is_empty:\n # clear any errors raised by the form fields\n # (before this clean() method is run, each field\n # performs its own clean, which could result in\n # errors that we wish to ignore at this point)\n #\n # NOTE: we cannot just clear() the errors list.\n # That causes problems.\n for field in self.fields:\n if field in self.errors:\n del self.errors[field]\n\n return self.cleaned_data\n\n\nclass BaseOtherContactsFormSet(RegistrarFormSet):\n JOIN = \"other_contacts\"\n\n def should_delete(self, cleaned):\n empty = (isinstance(v, str) and (v.strip() == \"\" or v is None) for v in cleaned.values())\n return all(empty)\n\n def to_database(self, obj: DomainApplication):\n self._to_database(obj, self.JOIN, self.should_delete, self.pre_update, self.pre_create)\n\n @classmethod\n def from_database(cls, obj):\n return super().from_database(obj, cls.JOIN, cls.on_fetch)\n\n\nOtherContactsFormSet = forms.formset_factory(\n OtherContactsForm,\n extra=1,\n absolute_max=1500, # django default; use `max_num` to limit entries\n formset=BaseOtherContactsFormSet,\n)\n\n\nclass NoOtherContactsForm(RegistrarForm):\n no_other_contacts_rationale = forms.CharField(\n required=True,\n # label has to end in a space to get the label_suffix to show\n label=(\n \"Please explain why there are no other employees from your organization \"\n \"we can contact to help us assess your eligibility for a .gov domain.\"\n ),\n widget=forms.Textarea(),\n validators=[\n MaxLengthValidator(\n 1000,\n message=\"Response must be less than 1000 characters.\",\n )\n ],\n )\n\n\nclass AnythingElseForm(RegistrarForm):\n anything_else = forms.CharField(\n required=False,\n label=\"Anything else?\",\n widget=forms.Textarea(),\n validators=[\n MaxLengthValidator(\n 1000,\n message=\"Response must be less than 1000 characters.\",\n )\n ],\n )\n\n\nclass RequirementsForm(RegistrarForm):\n is_policy_acknowledged = forms.BooleanField(\n label=\"I read and agree to the requirements for operating .gov domains.\",\n error_messages={\n \"required\": (\"Check the box if you read and agree to the requirements for operating .gov domains.\")\n },\n )\n",
"path": "src/registrar/forms/application_wizard.py"
}
] | diff --git a/src/registrar/forms/application_wizard.py b/src/registrar/forms/application_wizard.py
index 2802b1893..394007211 100644
--- a/src/registrar/forms/application_wizard.py
+++ b/src/registrar/forms/application_wizard.py
@@ -170,7 +170,7 @@ class TribalGovernmentForm(RegistrarForm):
)
tribe_name = forms.CharField(
- label="What is the name of the tribe you represent?",
+ label="Name of tribe",
error_messages={"required": "Enter the tribe you represent."},
)
diff --git a/src/registrar/templates/application_about_your_organization.html b/src/registrar/templates/application_about_your_organization.html
index 0d384b4f5..02e2e2c4f 100644
--- a/src/registrar/templates/application_about_your_organization.html
+++ b/src/registrar/templates/application_about_your_organization.html
@@ -2,14 +2,16 @@
{% load field_helpers %}
{% block form_instructions %}
- <p>We’d like to know more about your organization. Include the following in your response: </p>
+ <p>To help us determine your eligibility for a .gov domain, we need to know more about your organization. For example:</p>
<ul class="usa-list">
<li>The type of work your organization does </li>
- <li>How your organization is a government organization that is independent of a state government </li>
- <li>Include links to authorizing legislation, applicable bylaws or charter, or other documentation to support your claims.</li>
+ <li>How your organization operates independently from a state government</li>
+ <li>A description of the specialized, essential services you offer (if applicable)</li>
+ <li>Links to authorizing legislation, applicable bylaws or charter, or other documentation to support your claims</li>
</ul>
</p>
+<h2>What can you tell us about your organization?</h2>
{% endblock %}
{% block form_required_fields_help_text %}
@@ -20,4 +22,4 @@
{% with attr_maxlength=1000 add_label_class="usa-sr-only" %}
{% input_with_errors forms.0.about_your_organization %}
{% endwith %}
-{% endblock %}
\ No newline at end of file
+{% endblock %}
diff --git a/src/registrar/templates/application_authorizing_official.html b/src/registrar/templates/application_authorizing_official.html
index 3e33ab34e..068457373 100644
--- a/src/registrar/templates/application_authorizing_official.html
+++ b/src/registrar/templates/application_authorizing_official.html
@@ -14,7 +14,7 @@ <h2 class="margin-bottom-05">
{% include "includes/ao_example.html" %}
</div>
-<p>We typically don’t reach out to the authorizing official, but if contact is necessary, our practice is to coordinate first with you, the requestor. Read more about <a class="usa-link" rel="noopener noreferrer" target="_blank" href="{% public_site_url 'domains/eligibility/#you-must-have-approval-from-an-authorizing-official-within-your-organization' %}">who can serve as an authorizing official</a>.</p>
+<p>We typically don’t reach out to the authorizing official, but if contact is necessary, our practice is to coordinate with you, the requestor, first.</p>
{% endblock %}
diff --git a/src/registrar/templates/application_current_sites.html b/src/registrar/templates/application_current_sites.html
index 67343aee9..debadcfe2 100644
--- a/src/registrar/templates/application_current_sites.html
+++ b/src/registrar/templates/application_current_sites.html
@@ -2,9 +2,9 @@
{% load static field_helpers %}
{% block form_instructions %}
- <p>Enter your organization’s current public website, if you have one. For example,
- www.city.com. We can better evaluate your domain request if we know about domains
-you’re already using. If you already have any .gov domains please include them. This question is optional.</p>
+ <p>We can better evaluate your request if we know about domains you’re already using.</p>
+ <h2>What are the current websites for your organization?</h2>
+ <p>Enter your organization’s current public websites. If you already have a .gov domain, include that in your list. This question is optional.</p>
{% endblock %}
{% block form_required_fields_help_text %}
diff --git a/src/registrar/templates/application_org_contact.html b/src/registrar/templates/application_org_contact.html
index f5f773647..01b55d03d 100644
--- a/src/registrar/templates/application_org_contact.html
+++ b/src/registrar/templates/application_org_contact.html
@@ -2,15 +2,12 @@
{% load field_helpers %}
{% block form_instructions %}
- <h2 class="margin-bottom-05">
- What is the name and mailing address of your organization?
- </h2>
+ <p>If your domain request is approved, the name of your organization and your city/state will be listed in <a href="https://beta.get.gov/about/data/" target="_blank">.gov’s public data.</a></p>
- <p>Enter the name of the organization you represent. Your organization might be part
- of a larger entity. If so, enter information about your part of the larger entity.</p>
+ <h2>What is the name and mailing address of the organization you represent?</h2>
+
+ <p>Your organization might be part of a larger entity. If so, enter the name of your part of the larger entity. </p>
- <p>If your domain request is approved, the name of your organization will be publicly
- listed as the domain registrant.</p>
{% endblock %}
@@ -43,4 +40,4 @@ <h2 class="margin-bottom-05">
{% input_with_errors forms.0.urbanization %}
</fieldset>
-{% endblock %}
\ No newline at end of file
+{% endblock %}
diff --git a/src/registrar/templates/application_org_election.html b/src/registrar/templates/application_org_election.html
index 04c8f2657..b2ef462b5 100644
--- a/src/registrar/templates/application_org_election.html
+++ b/src/registrar/templates/application_org_election.html
@@ -2,9 +2,11 @@
{% load field_helpers %}
{% block form_instructions %}
- <h2 class="margin-bottom-05">Is your organization an election office?</h2>
+
- <p>An election office is a government entity whose <em>primary</em> responsibility is overseeing elections and/or conducting voter registration.</p>
+ <p>An election office is a government entity whose primary responsibility is overseeing elections and/or conducting voter registration. If your organization is an election office, we'll prioritize your request.</p>
+
+ <h2>Is your organization an election office?</h2>
<p>Answer “yes” only if the <em>main purpose</em> of your organization is to serve as an election office.</p>
diff --git a/src/registrar/templates/application_tribal_government.html b/src/registrar/templates/application_tribal_government.html
index bdca60907..3e79a4524 100644
--- a/src/registrar/templates/application_tribal_government.html
+++ b/src/registrar/templates/application_tribal_government.html
@@ -1,24 +1,24 @@
{% extends 'application_form.html' %}
{% load field_helpers %}
+{% block form_instructions %}
+ <p>To help us determine your eligibility for a .gov domain, we need to know more about your tribal government.</p>
+{% endblock %}
{% block form_fields %}
- {% with sublabel_text="Please include the entire name of your tribe as recognized by the Bureau of Indian Affairs." %}
- {% with link_text="Bureau of Indian Affairs" %}
- {% with link_href="https://www.federalregister.gov/documents/2023/01/12/2023-00504/indian-entities-recognized-by-and-eligible-to-receive-services-from-the-united-states-bureau-of" %}
- {% with external_link="true" target_blank="true" %}
- {% input_with_errors forms.0.tribe_name %}
- {% endwith %}
- {% endwith %}
- {% endwith %}
+ <h2>What is the name of the tribe you represent?</h2>
+ <p>Please include the full name of your tribe as recognized by the <a rel="noopener noreferrer" class="usa-link usa-link--external" href="https://www.federalregister.gov/documents/2024/01/08/2024-00109/indian-entities-recognized-by-and-eligible-to-receive-services-from-the-united-states-bureau-of" target="_blank">Bureau of Indian Affairs</a>.</p>
+
+ {% with external_link="true" target_blank="true" %}
+ {% input_with_errors forms.0.tribe_name %}
{% endwith %}
<fieldset class="usa-fieldset">
<legend class="usa-legend">
- <p>Is your organization a federally-recognized tribe or a state-recognized tribe? Check all that apply.
- <abbr class="usa-hint usa-hint--required" title="required">*</abbr></p>
+ <h2>Is your organization a federally-recognized tribe or a state-recognized tribe?</h2>
</legend>
+ <p>Check all that apply. <abbr class="usa-hint usa-hint--required" title="required">*</abbr></p>
{% input_with_errors forms.0.federally_recognized_tribe %}
{% input_with_errors forms.0.state_recognized_tribe %}
</fieldset>
|
deis__deis-427 | permalinks in the documentation
It would be nice to permalink a specific header in the Deis documentation, much like how Stackato's documentation is built: http://docs.stackato.com/client/index.html#getting-help
This is probably a flag set somewhere in Sphinx to get this set up, but would be awesome for referential purposes on IRC or by email.
| [
{
"content": "# -*- coding: utf-8 -*-\n#\n# deis documentation build configuration file, created by\n# sphinx-quickstart on Fri Jul 26 12:12:00 2013.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\nsys.path.insert(0, os.path.abspath('..'))\n# create local_settings.py for SECRET_KEY if necessary\nlocal_settings_path = os.path.abspath(\n os.path.join('..', 'deis', 'local_settings.py'))\nif not os.path.exists(local_settings_path):\n with open(local_settings_path, 'w') as local_settings:\n local_settings.write(\"SECRET_KEY = 'DummySecretKey'\\n\")\n# set up Django\nos.environ['DJANGO_SETTINGS_MODULE'] = 'deis.settings'\nfrom django.conf import settings # noqa\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',\n 'sphinx.ext.viewcode', 'sphinxcontrib.httpdomain']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'toctree'\n\n# General information about the project.\nproject = u'deis'\ncopyright = u'2013, OpDemand LLC'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\nfrom deis import __version__\n\n# The short X.Y version.\nversion = __version__.rsplit('.', 1)[0]\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'deis'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['theme']\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['../web/static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\nhtml_use_smartypants = True\n\nhtml_add_permalinks = None\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'deisdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'deis.tex', u'deis Documentation',\n u'Author', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'deis', u'deis Documentation',\n [u'Author'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'deis', u'deis Documentation',\n u'Author', 'deis', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\n# -- Options for Epub output ---------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = u'deis'\nepub_author = u'OpDemand LLC'\nepub_publisher = u'OpDemand LLC'\nepub_copyright = u'2013, OpDemand LLC'\n\n# The language of the text. It defaults to the language option\n# or en if the language is not set.\n#epub_language = ''\n\n# The scheme of the identifier. Typical schemes are ISBN or URL.\n#epub_scheme = ''\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#epub_identifier = ''\n\n# A unique identification for the text.\n#epub_uid = ''\n\n# A tuple containing the cover image and cover page html template filenames.\n#epub_cover = ()\n\n# A sequence of (type, uri, title) tuples for the guide element of content.opf.\n#epub_guide = ()\n\n# HTML files that should be inserted before the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n#epub_pre_files = []\n\n# HTML files shat should be inserted after the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n#epub_post_files = []\n\n# A list of files that should not be packed into the epub file.\n#epub_exclude_files = []\n\n# The depth of the table of contents in toc.ncx.\n#epub_tocdepth = 3\n\n# Allow duplicate toc entries.\n#epub_tocdup = True\n\n# Fix unsupported image types using the PIL.\n#epub_fix_images = False\n\n# Scale large images.\n#epub_max_image_width = 0\n\n# If 'no', URL addresses will not be shown.\n#epub_show_urls = 'inline'\n\n# If false, no index is generated.\n#epub_use_index = True\n",
"path": "docs/conf.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# deis documentation build configuration file, created by\n# sphinx-quickstart on Fri Jul 26 12:12:00 2013.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\nsys.path.insert(0, os.path.abspath('..'))\n# create local_settings.py for SECRET_KEY if necessary\nlocal_settings_path = os.path.abspath(\n os.path.join('..', 'deis', 'local_settings.py'))\nif not os.path.exists(local_settings_path):\n with open(local_settings_path, 'w') as local_settings:\n local_settings.write(\"SECRET_KEY = 'DummySecretKey'\\n\")\n# set up Django\nos.environ['DJANGO_SETTINGS_MODULE'] = 'deis.settings'\nfrom django.conf import settings # noqa\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',\n 'sphinx.ext.viewcode', 'sphinxcontrib.httpdomain']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'toctree'\n\n# General information about the project.\nproject = u'deis'\ncopyright = u'2013, OpDemand LLC'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\nfrom deis import __version__\n\n# The short X.Y version.\nversion = __version__.rsplit('.', 1)[0]\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'deis'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['theme']\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['../web/static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\nhtml_use_smartypants = True\n\nhtml_add_permalinks = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'deisdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'deis.tex', u'deis Documentation',\n u'Author', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'deis', u'deis Documentation',\n [u'Author'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'deis', u'deis Documentation',\n u'Author', 'deis', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\n# -- Options for Epub output ---------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = u'deis'\nepub_author = u'OpDemand LLC'\nepub_publisher = u'OpDemand LLC'\nepub_copyright = u'2013, OpDemand LLC'\n\n# The language of the text. It defaults to the language option\n# or en if the language is not set.\n#epub_language = ''\n\n# The scheme of the identifier. Typical schemes are ISBN or URL.\n#epub_scheme = ''\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#epub_identifier = ''\n\n# A unique identification for the text.\n#epub_uid = ''\n\n# A tuple containing the cover image and cover page html template filenames.\n#epub_cover = ()\n\n# A sequence of (type, uri, title) tuples for the guide element of content.opf.\n#epub_guide = ()\n\n# HTML files that should be inserted before the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n#epub_pre_files = []\n\n# HTML files shat should be inserted after the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n#epub_post_files = []\n\n# A list of files that should not be packed into the epub file.\n#epub_exclude_files = []\n\n# The depth of the table of contents in toc.ncx.\n#epub_tocdepth = 3\n\n# Allow duplicate toc entries.\n#epub_tocdup = True\n\n# Fix unsupported image types using the PIL.\n#epub_fix_images = False\n\n# Scale large images.\n#epub_max_image_width = 0\n\n# If 'no', URL addresses will not be shown.\n#epub_show_urls = 'inline'\n\n# If false, no index is generated.\n#epub_use_index = True\n",
"path": "docs/conf.py"
}
] | diff --git a/docs/conf.py b/docs/conf.py
index 89d654b8d7..ced6a1ee8e 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -147,7 +147,7 @@
# typographically correct entities.
html_use_smartypants = True
-html_add_permalinks = None
+html_add_permalinks = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
diff --git a/docs/server/api.tasks.rst b/docs/server/api.tasks.rst
index f73600c24c..64593ea4f5 100644
--- a/docs/server/api.tasks.rst
+++ b/docs/server/api.tasks.rst
@@ -19,9 +19,4 @@ api.tasks
.. autofunction:: destroy_node(node)
.. autofunction:: converge_node(node)
.. autofunction:: run_node(node, command)
- .. autofunction:: build_formation(formation)
- .. autofunction:: destroy_formation(formation)
- .. autofunction:: converge_formation(formation)
- .. autofunction:: build_app(app)
- .. autofunction:: destroy_app(app)
.. autofunction:: converge_controller()
diff --git a/docs/theme/deis/layout.html b/docs/theme/deis/layout.html
index 4bb1ddd0ec..475cff977e 100644
--- a/docs/theme/deis/layout.html
+++ b/docs/theme/deis/layout.html
@@ -28,6 +28,7 @@
<link rel="stylesheet" href="{{ pathto('_static/css/bootstrap.min.css', 1) }}" type="text/css">
<link rel="stylesheet" href="{{ pathto('_static/css/bootstrap-responsive.min.css', 1) }}" type="text/css">
<link rel="stylesheet" href="{{ pathto('_static/css/main.css', 1) }}" type="text/css">
+ <link rel="stylesheet" href="{{ pathto('_static/css/deis-docs.css', 1) }}" type="text/css">
<link rel="stylesheet" href="{{ pathto('_static/pygments.css', 1) }}" type="text/css">
<link rel="shortcut icon" href="{{ pathto('_static/favicon.ico', 1) }}">
<script
diff --git a/web/static/css/deis-docs.css b/web/static/css/deis-docs.css
new file mode 100644
index 0000000000..4e221a4124
--- /dev/null
+++ b/web/static/css/deis-docs.css
@@ -0,0 +1,16 @@
+
+a.headerlink {
+ visibility: hidden;
+ margin-left: 8px;
+ vertical-align: middle;
+}
+
+h1:hover > a.headerlink,
+h2:hover > a.headerlink,
+h3:hover > a.headerlink,
+h4:hover > a.headerlink,
+h5:hover > a.headerlink,
+h6:hover > a.headerlink,
+dt:hover > a.headerlink {
+ visibility: visible;
+}
|
strawberry-graphql__strawberry-945 | Can't get DataLoader to work
Hello! I'm trying examples from this page https://strawberry.rocks/docs/guides/dataloaders.
Running the following code on Python 3.8:
```python
import strawberry
from strawberry.dataloader import DataLoader
from typing import List
@strawberry.type
class User:
id: strawberry.ID
async def load_users(keys) -> List[User]:
return [User(id=key) for key in keys]
loader = DataLoader(load_fn=load_users)
@strawberry.type
class Query:
@strawberry.field
async def get_user(self, id: strawberry.ID) -> User:
return await loader.load(id)
schema = strawberry.Schema(query=Query)
```
I get the following error message:
```
Task <Task pending name='Task-8' coro=<ExecutionContext.resolve_field.<locals>.await_result()
running at /Users/-/Documents/src/dataservice-poc/virtualenv/lib/python3.8/site-packages/graphql/execution/execute.py:625>
cb=[gather.<locals>._done_callback() at /usr/local/Cellar/[email protected]/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/asyncio/tasks.py:758]>
got Future <Future pending> attached to a different loop
```
When I try my own code (which is pretty much the same, but the loader is real - it reads data from the db) I get this: "RuntimeError: await wasn't used with future".
I'm stuck, don't really know where to look. I thought Strawberry is supposed to manage async processing, but looks like it doesn't work that way. Any help would be greatly appreciated.
| [
{
"content": "import importlib\nimport sys\n\nimport click\nimport hupper\nimport uvicorn\nfrom starlette.applications import Starlette\nfrom starlette.middleware.cors import CORSMiddleware\n\nfrom strawberry import Schema\nfrom strawberry.asgi import GraphQL\nfrom strawberry.utils.importer import import_module_symbol\n\n\[email protected](\"server\", short_help=\"Starts debug server\")\[email protected](\"schema\", type=str)\[email protected](\"-h\", \"--host\", default=\"0.0.0.0\", type=str)\[email protected](\"-p\", \"--port\", default=8000, type=int)\[email protected](\n \"--app-dir\",\n default=\".\",\n type=str,\n show_default=True,\n help=(\n \"Look for the module in the specified directory, by adding this to the \"\n \"PYTHONPATH. Defaults to the current working directory. \"\n \"Works the same as `--app-dir` in uvicorn.\"\n ),\n)\ndef server(schema, host, port, app_dir):\n sys.path.insert(0, app_dir)\n\n try:\n schema_symbol = import_module_symbol(schema, default_symbol_name=\"schema\")\n except (ImportError, AttributeError) as exc:\n message = str(exc)\n raise click.BadArgumentUsage(message)\n\n if not isinstance(schema_symbol, Schema):\n message = \"The `schema` must be an instance of strawberry.Schema\"\n raise click.BadArgumentUsage(message)\n\n reloader = hupper.start_reloader(\"strawberry.cli.run\", verbose=False)\n schema_module = importlib.import_module(schema_symbol.__module__)\n reloader.watch_files([schema_module.__file__])\n\n app = Starlette(debug=True)\n app.add_middleware(\n CORSMiddleware, allow_headers=[\"*\"], allow_origins=[\"*\"], allow_methods=[\"*\"]\n )\n\n graphql_app = GraphQL(schema_symbol, debug=True)\n\n paths = [\"/\", \"/graphql\"]\n for path in paths:\n app.add_route(path, graphql_app)\n app.add_websocket_route(path, graphql_app)\n\n print(f\"Running strawberry on http://{host}:{port}/ 🍓\")\n uvicorn.run(app, host=host, port=port, log_level=\"error\")\n",
"path": "strawberry/cli/commands/server.py"
}
] | [
{
"content": "import importlib\nimport sys\n\nimport click\nimport hupper\nimport uvicorn\nfrom starlette.applications import Starlette\nfrom starlette.middleware.cors import CORSMiddleware\n\nfrom strawberry import Schema\nfrom strawberry.asgi import GraphQL\nfrom strawberry.utils.importer import import_module_symbol\n\n\[email protected](\"server\", short_help=\"Starts debug server\")\[email protected](\"schema\", type=str)\[email protected](\"-h\", \"--host\", default=\"0.0.0.0\", type=str)\[email protected](\"-p\", \"--port\", default=8000, type=int)\[email protected](\n \"--app-dir\",\n default=\".\",\n type=str,\n show_default=True,\n help=(\n \"Look for the module in the specified directory, by adding this to the \"\n \"PYTHONPATH. Defaults to the current working directory. \"\n \"Works the same as `--app-dir` in uvicorn.\"\n ),\n)\ndef server(schema, host, port, app_dir):\n sys.path.insert(0, app_dir)\n\n try:\n schema_symbol = import_module_symbol(schema, default_symbol_name=\"schema\")\n except (ImportError, AttributeError) as exc:\n message = str(exc)\n raise click.BadArgumentUsage(message)\n\n if not isinstance(schema_symbol, Schema):\n message = \"The `schema` must be an instance of strawberry.Schema\"\n raise click.BadArgumentUsage(message)\n\n reloader = hupper.start_reloader(\"strawberry.cli.run\", verbose=False)\n schema_module = importlib.import_module(schema_symbol.__module__)\n reloader.watch_files([schema_module.__file__])\n\n app = Starlette(debug=True)\n app.add_middleware(\n CORSMiddleware, allow_headers=[\"*\"], allow_origins=[\"*\"], allow_methods=[\"*\"]\n )\n\n graphql_app = GraphQL(schema_symbol, debug=True)\n\n paths = [\"/\", \"/graphql\"]\n for path in paths:\n app.add_route(path, graphql_app)\n app.add_websocket_route(path, graphql_app)\n\n print(f\"Running strawberry on http://{host}:{port}/ 🍓\")\n uvicorn.run(app, loop=\"none\", host=host, port=port, log_level=\"error\")\n",
"path": "strawberry/cli/commands/server.py"
}
] | diff --git a/RELEASE.md b/RELEASE.md
new file mode 100644
index 0000000000..920b50d7a0
--- /dev/null
+++ b/RELEASE.md
@@ -0,0 +1,4 @@
+Release type: patch
+
+This releases fixes an issue with the debug server that prevented the
+usage of dataloaders, see: https://github.com/strawberry-graphql/strawberry/issues/940
diff --git a/strawberry/cli/commands/server.py b/strawberry/cli/commands/server.py
index faf25ede65..d751595725 100644
--- a/strawberry/cli/commands/server.py
+++ b/strawberry/cli/commands/server.py
@@ -57,4 +57,4 @@ def server(schema, host, port, app_dir):
app.add_websocket_route(path, graphql_app)
print(f"Running strawberry on http://{host}:{port}/ 🍓")
- uvicorn.run(app, host=host, port=port, log_level="error")
+ uvicorn.run(app, loop="none", host=host, port=port, log_level="error")
|
scverse__scanpy-1255 | Scanpy spatial bug in visualisation defaults
Looks like the default values for plotting Visium spots are way outsized.
Try this in your lymph node notebook:
```python
sc.pl.spatial(adata, img_key = "hires", cmap='magma',
color=['total_counts', 'n_genes_by_counts'],
gene_symbols='SYMBOL')
```
This is what I get:
...

#### Versions:
scanpy==1.5.1 anndata==0.7.1 umap==0.3.10 numpy==1.17.3 scipy==1.4.1 pandas==0.25.3 scikit-learn==0.22.1 statsmodels==0.10.2 python-igraph==0.7.1 louvain==0.6.1 leidenalg==0.7.0
| [
{
"content": "import collections.abc as cabc\nfrom typing import Union, Optional, Sequence, Any, Mapping, List, Tuple, Callable\n\nimport numpy as np\nfrom anndata import AnnData\nfrom cycler import Cycler\nfrom matplotlib.axes import Axes\nfrom matplotlib.figure import Figure\nfrom pandas.api.types import is_categorical_dtype\nfrom matplotlib import pyplot as pl, colors\nfrom matplotlib import rcParams\nfrom matplotlib import patheffects\nfrom matplotlib.colors import Colormap\nfrom functools import partial\n\nfrom .. import _utils\nfrom .._utils import (\n _IGraphLayout,\n _FontWeight,\n _FontSize,\n circles,\n make_projection_available,\n)\nfrom .._docs import (\n doc_adata_color_etc,\n doc_edges_arrows,\n doc_scatter_embedding,\n doc_show_save_ax,\n)\nfrom ... import logging as logg\nfrom ..._settings import settings\nfrom ..._utils import sanitize_anndata, _doc_params, Empty, _empty\nfrom ..._compat import Literal\n\nVMinMax = Union[str, float, Callable[[Sequence[float]], float]]\n\n\n@_doc_params(\n adata_color_etc=doc_adata_color_etc,\n edges_arrows=doc_edges_arrows,\n scatter_bulk=doc_scatter_embedding,\n show_save_ax=doc_show_save_ax,\n)\ndef embedding(\n adata: AnnData,\n basis: str,\n *,\n color: Union[str, Sequence[str], None] = None,\n gene_symbols: Optional[str] = None,\n use_raw: Optional[bool] = None,\n sort_order: bool = True,\n edges: bool = False,\n edges_width: float = 0.1,\n edges_color: Union[str, Sequence[float], Sequence[str]] = 'grey',\n neighbors_key: Optional[str] = None,\n arrows: bool = False,\n arrows_kwds: Optional[Mapping[str, Any]] = None,\n groups: Optional[str] = None,\n components: Union[str, Sequence[str]] = None,\n layer: Optional[str] = None,\n projection: Literal['2d', '3d'] = '2d',\n # image parameters\n img_key: Optional[str] = None,\n crop_coord: Tuple[int, int, int, int] = None,\n alpha_img: float = 1.0,\n bw: bool = False,\n library_id: str = None,\n #\n color_map: Union[Colormap, str, None] = None,\n palette: Union[str, Sequence[str], Cycler, None] = None,\n size: Union[float, Sequence[float], None] = None,\n frameon: Optional[bool] = None,\n legend_fontsize: Union[int, float, _FontSize, None] = None,\n legend_fontweight: Union[int, _FontWeight] = 'bold',\n legend_loc: str = 'right margin',\n legend_fontoutline: Optional[int] = None,\n vmax: Union[VMinMax, Sequence[VMinMax], None] = None,\n vmin: Union[VMinMax, Sequence[VMinMax], None] = None,\n add_outline: Optional[bool] = False,\n outline_width: Tuple[float, float] = (0.3, 0.05),\n outline_color: Tuple[str, str] = ('black', 'white'),\n ncols: int = 4,\n hspace: float = 0.25,\n wspace: Optional[float] = None,\n title: Union[str, Sequence[str], None] = None,\n show: Optional[bool] = None,\n save: Union[bool, str, None] = None,\n ax: Optional[Axes] = None,\n return_fig: Optional[bool] = None,\n **kwargs,\n) -> Union[Figure, Axes, None]:\n \"\"\"\\\n Scatter plot for user specified embedding basis (e.g. umap, pca, etc)\n\n Parameters\n ----------\n basis\n Name of the `obsm` basis to use.\n {adata_color_etc}\n {edges_arrows}\n {scatter_bulk}\n {show_save_ax}\n\n Returns\n -------\n If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.\n \"\"\"\n\n sanitize_anndata(adata)\n if color_map is not None:\n kwargs['cmap'] = color_map\n if size is not None:\n kwargs['s'] = size\n if 'edgecolor' not in kwargs:\n # by default turn off edge color. Otherwise, for\n # very small sizes the edge will not reduce its size\n # (https://github.com/theislab/scanpy/issues/293)\n kwargs['edgecolor'] = 'none'\n\n if groups:\n if isinstance(groups, str):\n groups = [groups]\n\n make_projection_available(projection)\n args_3d = dict(projection='3d') if projection == '3d' else {}\n\n # Deal with Raw\n if use_raw is None:\n # check if adata.raw is set\n use_raw = layer is None and adata.raw is not None\n if use_raw and layer is not None:\n raise ValueError(\n \"Cannot use both a layer and the raw representation. Was passed:\"\n f\"use_raw={use_raw}, layer={layer}.\"\n )\n\n if wspace is None:\n # try to set a wspace that is not too large or too small given the\n # current figure size\n wspace = 0.75 / rcParams['figure.figsize'][0] + 0.02\n if adata.raw is None and use_raw:\n raise ValueError(\n \"`use_raw` is set to True but AnnData object does not have raw. \"\n \"Please check.\"\n )\n # turn color into a python list\n color = [color] if isinstance(color, str) or color is None else list(color)\n if title is not None:\n # turn title into a python list if not None\n title = [title] if isinstance(title, str) else list(title)\n\n # get the points position and the components list\n # (only if components is not None)\n data_points, components_list = _get_data_points(\n adata, basis, projection, components, img_key, library_id\n )\n\n # Setup layout.\n # Most of the code is for the case when multiple plots are required\n # 'color' is a list of names that want to be plotted.\n # Eg. ['Gene1', 'louvain', 'Gene2'].\n # component_list is a list of components [[0,1], [1,2]]\n if (\n not isinstance(color, str)\n and isinstance(color, cabc.Sequence)\n and len(color) > 1\n ) or len(components_list) > 1:\n if ax is not None:\n raise ValueError(\n \"Cannot specify `ax` when plotting multiple panels \"\n \"(each for a given value of 'color').\"\n )\n if len(components_list) == 0:\n components_list = [None]\n\n # each plot needs to be its own panel\n num_panels = len(color) * len(components_list)\n fig, grid = _panel_grid(hspace, wspace, ncols, num_panels)\n else:\n if len(components_list) == 0:\n components_list = [None]\n grid = None\n if ax is None:\n fig = pl.figure()\n ax = fig.add_subplot(111, **args_3d)\n\n # turn vmax and vmin into a sequence\n if isinstance(vmax, str) or not isinstance(vmax, cabc.Sequence):\n vmax = [vmax]\n if isinstance(vmin, str) or not isinstance(vmin, cabc.Sequence):\n vmin = [vmin]\n\n if 's' in kwargs:\n size = kwargs.pop('s')\n\n if size is not None:\n # check if size is any type of sequence, and if so\n # set as ndarray\n import pandas.core.series\n\n if (\n size is not None\n and isinstance(\n size, (cabc.Sequence, pandas.core.series.Series, np.ndarray,)\n )\n and len(size) == adata.shape[0]\n ):\n size = np.array(size, dtype=float)\n else:\n size = 120000 / adata.shape[0]\n\n ###\n # make the plots\n axs = []\n import itertools\n\n idx_components = range(len(components_list))\n\n # use itertools.product to make a plot for each color and for each component\n # For example if color=[gene1, gene2] and components=['1,2, '2,3'].\n # The plots are: [\n # color=gene1, components=[1,2], color=gene1, components=[2,3],\n # color=gene2, components = [1, 2], color=gene2, components=[2,3],\n # ]\n for count, (value_to_plot, component_idx) in enumerate(\n itertools.product(color, idx_components)\n ):\n color_vector, categorical = _get_color_values(\n adata,\n value_to_plot,\n layer=layer,\n groups=groups,\n palette=palette,\n use_raw=use_raw,\n gene_symbols=gene_symbols,\n )\n\n # check if higher value points should be plot on top\n if sort_order is True and value_to_plot is not None and categorical is False:\n order = np.argsort(color_vector)\n color_vector = color_vector[order]\n _data_points = data_points[component_idx][order, :]\n\n # check if 'size' is given (stored in kwargs['s']\n # and reorder it.\n if isinstance(size, np.ndarray):\n size = np.array(size)[order]\n else:\n _data_points = data_points[component_idx]\n\n # if plotting multiple panels, get the ax from the grid spec\n # else use the ax value (either user given or created previously)\n if grid:\n ax = pl.subplot(grid[count], **args_3d)\n axs.append(ax)\n if not (settings._frameon if frameon is None else frameon):\n ax.axis('off')\n if title is None:\n if value_to_plot is not None:\n ax.set_title(value_to_plot)\n else:\n ax.set_title('')\n else:\n try:\n ax.set_title(title[count])\n except IndexError:\n logg.warning(\n \"The title list is shorter than the number of panels. \"\n \"Using 'color' value instead for some plots.\"\n )\n ax.set_title(value_to_plot)\n\n # check vmin and vmax options\n if categorical:\n kwargs['vmin'] = kwargs['vmax'] = None\n else:\n kwargs['vmin'], kwargs['vmax'] = _get_vmin_vmax(\n vmin, vmax, count, color_vector\n )\n\n # make the scatter plot\n if projection == '3d':\n cax = ax.scatter(\n _data_points[:, 0],\n _data_points[:, 1],\n _data_points[:, 2],\n marker=\".\",\n c=color_vector,\n rasterized=settings._vector_friendly,\n **kwargs,\n )\n else:\n if img_key is not None:\n # had to return size_spot cause spot size is set according\n # to the image to be plotted\n img_processed, img_coord, size_spot, cmap_img = _process_image(\n adata, data_points, img_key, crop_coord, size, library_id, bw\n )\n ax.imshow(img_processed, cmap=cmap_img, alpha=alpha_img)\n ax.set_xlim(img_coord[0], img_coord[1])\n ax.set_ylim(img_coord[3], img_coord[2])\n elif img_key is None and library_id is not None:\n # order of magnitude similar to public visium\n size_spot = 70 * size\n\n scatter = (\n partial(ax.scatter, s=size)\n if library_id is None\n else partial(circles, s=size_spot, ax=ax)\n )\n\n if add_outline:\n # the default outline is a black edge followed by a\n # thin white edged added around connected clusters.\n # To add an outline\n # three overlapping scatter plots are drawn:\n # First black dots with slightly larger size,\n # then, white dots a bit smaller, but still larger\n # than the final dots. Then the final dots are drawn\n # with some transparency.\n\n bg_width, gap_width = outline_width\n point = np.sqrt(size)\n gap_size = (point + (point * gap_width) * 2) ** 2\n bg_size = (np.sqrt(gap_size) + (point * bg_width) * 2) ** 2\n # the default black and white colors can be changes using\n # the contour_config parameter\n bg_color, gap_color = outline_color\n\n # remove edge from kwargs if present\n # because edge needs to be set to None\n kwargs['edgecolor'] = 'none'\n\n # remove alpha for outline\n alpha = kwargs.pop('alpha') if 'alpha' in kwargs else None\n\n ax.scatter(\n _data_points[:, 0],\n _data_points[:, 1],\n s=bg_size,\n marker=\".\",\n c=bg_color,\n rasterized=settings._vector_friendly,\n **kwargs,\n )\n ax.scatter(\n _data_points[:, 0],\n _data_points[:, 1],\n s=gap_size,\n marker=\".\",\n c=gap_color,\n rasterized=settings._vector_friendly,\n **kwargs,\n )\n # if user did not set alpha, set alpha to 0.7\n kwargs['alpha'] = 0.7 if alpha is None else alpha\n\n if groups:\n # first plot non-groups and then plot the\n # required groups on top\n\n in_groups = np.array(adata.obs[value_to_plot].isin(groups))\n\n if isinstance(size, np.ndarray):\n in_groups_size = size[in_groups]\n not_in_groups_size = size[~in_groups]\n elif img_key is not None:\n in_groups_size = not_in_groups_size = size_spot\n else:\n in_groups_size = not_in_groups_size = size\n\n # only show grey points if no image is below\n if library_id is None:\n ax.scatter(\n _data_points[~in_groups, 0],\n _data_points[~in_groups, 1],\n s=not_in_groups_size,\n marker=\".\",\n c=color_vector[~in_groups],\n rasterized=settings._vector_friendly,\n **kwargs,\n )\n cax = scatter(\n _data_points[in_groups, 0],\n _data_points[in_groups, 1],\n s=in_groups_size,\n marker=\".\",\n c=color_vector[in_groups],\n rasterized=settings._vector_friendly,\n **kwargs,\n )\n\n else:\n cax = scatter(\n _data_points[:, 0],\n _data_points[:, 1],\n marker=\".\",\n c=color_vector,\n rasterized=settings._vector_friendly,\n **kwargs,\n )\n\n # remove y and x ticks\n ax.set_yticks([])\n ax.set_xticks([])\n if projection == '3d':\n ax.set_zticks([])\n\n # set default axis_labels\n name = _basis2name(basis)\n if components is not None:\n axis_labels = [name + str(x + 1) for x in components_list[component_idx]]\n elif projection == '3d':\n axis_labels = [name + str(x + 1) for x in range(3)]\n\n else:\n axis_labels = [name + str(x + 1) for x in range(2)]\n\n ax.set_xlabel(axis_labels[0])\n ax.set_ylabel(axis_labels[1])\n if projection == '3d':\n # shift the label closer to the axis\n ax.set_zlabel(axis_labels[2], labelpad=-7)\n ax.autoscale_view()\n\n if edges:\n _utils.plot_edges(ax, adata, basis, edges_width, edges_color, neighbors_key)\n if arrows:\n _utils.plot_arrows(ax, adata, basis, arrows_kwds)\n\n if value_to_plot is None:\n # if only dots were plotted without an associated value\n # there is not need to plot a legend or a colorbar\n continue\n\n if legend_fontoutline is not None:\n path_effect = [\n patheffects.withStroke(linewidth=legend_fontoutline, foreground='w',)\n ]\n else:\n path_effect = None\n\n _add_legend_or_colorbar(\n adata,\n ax,\n cax,\n categorical,\n value_to_plot,\n legend_loc,\n _data_points,\n legend_fontweight,\n legend_fontsize,\n path_effect,\n groups,\n bool(grid),\n )\n\n if return_fig is True:\n return fig\n axs = axs if grid else ax\n _utils.savefig_or_show(basis, show=show, save=save)\n if show is False:\n return axs\n\n\ndef _panel_grid(hspace, wspace, ncols, num_panels):\n from matplotlib import gridspec\n\n n_panels_x = min(ncols, num_panels)\n n_panels_y = np.ceil(num_panels / n_panels_x).astype(int)\n # each panel will have the size of rcParams['figure.figsize']\n fig = pl.figure(\n figsize=(\n n_panels_x * rcParams['figure.figsize'][0] * (1 + wspace),\n n_panels_y * rcParams['figure.figsize'][1],\n ),\n )\n left = 0.2 / n_panels_x\n bottom = 0.13 / n_panels_y\n gs = gridspec.GridSpec(\n nrows=n_panels_y,\n ncols=n_panels_x,\n left=left,\n right=1 - (n_panels_x - 1) * left - 0.01 / n_panels_x,\n bottom=bottom,\n top=1 - (n_panels_y - 1) * bottom - 0.1 / n_panels_y,\n hspace=hspace,\n wspace=wspace,\n )\n return fig, gs\n\n\ndef _get_vmin_vmax(\n vmin: Sequence[VMinMax],\n vmax: Sequence[VMinMax],\n index: int,\n color_vector: Sequence[float],\n) -> Tuple[Union[float, None], Union[float, None]]:\n\n \"\"\"\n Evaluates the value of vmin and vmax, which could be a\n str in which case is interpreted as a percentile and should\n be specified in the form 'pN' where N is the percentile.\n Eg. for a percentile of 85 the format would be 'p85'.\n Floats are accepted as p99.9\n\n Alternatively, vmin/vmax could be a function that is applied to\n the list of color values (`color_vector`). E.g.\n\n def my_vmax(color_vector): np.percentile(color_vector, p=80)\n\n\n Parameters\n ----------\n index\n This index of the plot\n color_vector\n List or values for the plot\n\n Returns\n -------\n\n (vmin, vmax) containing None or float values\n\n \"\"\"\n out = []\n for v_name, v in [('vmin', vmin), ('vmax', vmax)]:\n if len(v) == 1:\n # this case usually happens when the user sets eg vmax=0.9, which\n # is internally converted into list of len=1, but is expected that this\n # value applies to all plots.\n v_value = v[0]\n else:\n try:\n v_value = v[index]\n except IndexError:\n logg.error(\n f\"The parameter {v_name} is not valid. If setting multiple {v_name} values,\"\n f\"check that the length of the {v_name} list is equal to the number \"\n \"of plots. \"\n )\n v_value = None\n\n if v_value is not None:\n if isinstance(v_value, str) and v_value.startswith('p'):\n try:\n float(v_value[1:])\n except ValueError:\n logg.error(\n f\"The parameter {v_name}={v_value} for plot number {index + 1} is not valid. \"\n f\"Please check the correct format for percentiles.\"\n )\n # interpret value of vmin/vmax as quantile with the following syntax 'p99.9'\n v_value = np.percentile(color_vector, q=float(v_value[1:]))\n elif callable(v_value):\n # interpret vmin/vmax as function\n v_value = v_value(color_vector)\n if not isinstance(v_value, float):\n logg.error(\n f\"The return of the function given for {v_name} is not valid. \"\n \"Please check that the function returns a number.\"\n )\n v_value = None\n else:\n try:\n float(v_value)\n except ValueError:\n logg.error(\n f\"The given {v_name}={v_value} for plot number {index + 1} is not valid. \"\n f\"Please check that the value given is a valid number, a string \"\n f\"starting with 'p' for percentiles or a valid function.\"\n )\n v_value = None\n out.append(v_value)\n return tuple(out)\n\n\ndef _wraps_plot_scatter(wrapper):\n annots_orig = {\n k: v for k, v in wrapper.__annotations__.items() if k not in {'adata', 'kwargs'}\n }\n annots_scatter = {\n k: v for k, v in embedding.__annotations__.items() if k != 'basis'\n }\n wrapper.__annotations__ = {**annots_scatter, **annots_orig}\n wrapper.__wrapped__ = embedding\n return wrapper\n\n\n# API\n\n\n@_wraps_plot_scatter\n@_doc_params(\n adata_color_etc=doc_adata_color_etc,\n edges_arrows=doc_edges_arrows,\n scatter_bulk=doc_scatter_embedding,\n show_save_ax=doc_show_save_ax,\n)\ndef umap(adata, **kwargs) -> Union[Axes, List[Axes], None]:\n \"\"\"\\\n Scatter plot in UMAP basis.\n\n Parameters\n ----------\n {adata_color_etc}\n {edges_arrows}\n {scatter_bulk}\n {show_save_ax}\n\n Returns\n -------\n If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.\n \"\"\"\n return embedding(adata, 'umap', **kwargs)\n\n\n@_wraps_plot_scatter\n@_doc_params(\n adata_color_etc=doc_adata_color_etc,\n edges_arrows=doc_edges_arrows,\n scatter_bulk=doc_scatter_embedding,\n show_save_ax=doc_show_save_ax,\n)\ndef tsne(adata, **kwargs) -> Union[Axes, List[Axes], None]:\n \"\"\"\\\n Scatter plot in tSNE basis.\n\n Parameters\n ----------\n {adata_color_etc}\n {edges_arrows}\n {scatter_bulk}\n {show_save_ax}\n\n Returns\n -------\n If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.\n \"\"\"\n return embedding(adata, 'tsne', **kwargs)\n\n\n@_wraps_plot_scatter\n@_doc_params(\n adata_color_etc=doc_adata_color_etc,\n scatter_bulk=doc_scatter_embedding,\n show_save_ax=doc_show_save_ax,\n)\ndef diffmap(adata, **kwargs) -> Union[Axes, List[Axes], None]:\n \"\"\"\\\n Scatter plot in Diffusion Map basis.\n\n Parameters\n ----------\n {adata_color_etc}\n {scatter_bulk}\n {show_save_ax}\n\n Returns\n -------\n If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.\n \"\"\"\n return embedding(adata, 'diffmap', **kwargs)\n\n\n@_wraps_plot_scatter\n@_doc_params(\n adata_color_etc=doc_adata_color_etc,\n edges_arrows=doc_edges_arrows,\n scatter_bulk=doc_scatter_embedding,\n show_save_ax=doc_show_save_ax,\n)\ndef draw_graph(\n adata: AnnData, layout: Optional[_IGraphLayout] = None, **kwargs,\n) -> Union[Axes, List[Axes], None]:\n \"\"\"\\\n Scatter plot in graph-drawing basis.\n\n Parameters\n ----------\n {adata_color_etc}\n layout\n One of the :func:`~scanpy.tl.draw_graph` layouts.\n By default, the last computed layout is used.\n {edges_arrows}\n {scatter_bulk}\n {show_save_ax}\n\n Returns\n -------\n If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.\n \"\"\"\n if layout is None:\n layout = str(adata.uns['draw_graph']['params']['layout'])\n basis = 'draw_graph_' + layout\n if 'X_' + basis not in adata.obsm_keys():\n raise ValueError(\n 'Did not find {} in adata.obs. Did you compute layout {}?'.format(\n 'draw_graph_' + layout, layout\n )\n )\n\n return embedding(adata, basis, **kwargs)\n\n\n@_wraps_plot_scatter\n@_doc_params(\n adata_color_etc=doc_adata_color_etc,\n scatter_bulk=doc_scatter_embedding,\n show_save_ax=doc_show_save_ax,\n)\ndef pca(adata, **kwargs) -> Union[Axes, List[Axes], None]:\n \"\"\"\\\n Scatter plot in PCA coordinates.\n\n Parameters\n ----------\n {adata_color_etc}\n {scatter_bulk}\n {show_save_ax}\n\n Returns\n -------\n If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.\n \"\"\"\n return embedding(adata, 'pca', **kwargs)\n\n\n@_wraps_plot_scatter\n@_doc_params(\n adata_color_etc=doc_adata_color_etc,\n scatter_bulk=doc_scatter_embedding,\n show_save_ax=doc_show_save_ax,\n)\ndef spatial(\n adata,\n *,\n img_key: Union[str, None, Empty] = _empty,\n library_id: Union[str, Empty] = _empty,\n crop_coord: Tuple[int, int, int, int] = None,\n alpha_img: float = 1.0,\n bw: bool = False,\n size: float = None,\n **kwargs,\n) -> Union[Axes, List[Axes], None]:\n \"\"\"\\\n Scatter plot in spatial coordinates.\n\n Use the parameter `img_key` to see the image in the background\n And the parameter `library_id` to select the image.\n By default, `'hires'` and `'lowres'` are attempted.\n Also by default the first entry of `library_id` is attempted.\n Use `crop_coord`, `alpha_img`, and `bw` to control how it is displayed.\n Use `size` to scale the size of the Visium spots plotted on top.\n\n Parameters\n ----------\n {adata_color_etc}\n {scatter_bulk}\n {show_save_ax}\n\n Returns\n -------\n If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.\n \"\"\"\n if library_id is _empty:\n library_id = next((i for i in adata.uns['spatial'].keys()))\n else:\n if library_id not in adata.uns['spatial'].keys():\n raise KeyError(\n f\"Could not find '{library_id}' in adata.uns['spatial'].keys().\\n\"\n f\"Available keys are: {list(adata.uns['spatial'].keys())}.\"\n )\n\n spatial_data = adata.uns['spatial'][library_id]\n if img_key is _empty:\n img_key = next(\n (k for k in ['hires', 'lowres'] if k in spatial_data['images']), None,\n )\n\n if img_key is None and size is None:\n size = 1.0\n\n return embedding(\n adata,\n 'spatial',\n img_key=img_key,\n crop_coord=crop_coord,\n alpha_img=alpha_img,\n bw=bw,\n library_id=library_id,\n size=size,\n **kwargs,\n )\n\n\n# Helpers\n\n\ndef _get_data_points(\n adata, basis, projection, components, img_key, library_id\n) -> Tuple[List[np.ndarray], List[Tuple[int, int]]]:\n \"\"\"\n Returns the data points corresponding to the selected basis, projection and/or components.\n\n Because multiple components are given (eg components=['1,2', '2,3'] the\n returned data are lists, containing each of the components. When only one component is plotted\n the list length is 1.\n\n Returns\n -------\n data_points\n Each entry is a numpy array containing the data points\n components\n The cleaned list of components. Eg. [(0,1)] or [(0,1), (1,2)]\n for components = [1,2] and components=['1,2', '2,3'] respectively\n \"\"\"\n\n if basis in adata.obsm.keys():\n basis_key = basis\n\n elif f\"X_{basis}\" in adata.obsm.keys():\n basis_key = f\"X_{basis}\"\n else:\n raise KeyError(\n f\"Could not find entry in `obsm` for '{basis}'.\\n\"\n f\"Available keys are: {list(adata.obsm.keys())}.\"\n )\n\n n_dims = 2\n if projection == '3d':\n # check if the data has a third dimension\n if adata.obsm[basis_key].shape[1] == 2:\n if settings._low_resolution_warning:\n logg.warning(\n 'Selected projections is \"3d\" but only two dimensions '\n 'are available. Only these two dimensions will be plotted'\n )\n else:\n n_dims = 3\n\n if components == 'all':\n from itertools import combinations\n\n r_value = 3 if projection == '3d' else 2\n _components_list = np.arange(adata.obsm[basis_key].shape[1]) + 1\n components = [\n \",\".join(map(str, x)) for x in combinations(_components_list, r=r_value)\n ]\n\n components_list = []\n offset = 0\n if basis == 'diffmap':\n offset = 1\n if components is not None:\n # components have different formats, either a list with integers, a string\n # or a list of strings.\n\n if isinstance(components, str):\n # eg: components='1,2'\n components_list.append(\n tuple(int(x.strip()) - 1 + offset for x in components.split(','))\n )\n\n elif isinstance(components, cabc.Sequence):\n if isinstance(components[0], int):\n # components=[1,2]\n components_list.append(tuple(int(x) - 1 + offset for x in components))\n else:\n # in this case, the components are str\n # eg: components=['1,2'] or components=['1,2', '2,3]\n # More than one component can be given and is stored\n # as a new item of components_list\n for comp in components:\n components_list.append(\n tuple(int(x.strip()) - 1 + offset for x in comp.split(','))\n )\n\n else:\n raise ValueError(\n \"Given components: '{}' are not valid. Please check. \"\n \"A valid example is `components='2,3'`\"\n )\n # check if the components are present in the data\n try:\n data_points = []\n for comp in components_list:\n data_points.append(adata.obsm[basis_key][:, comp])\n except:\n raise ValueError(\n \"Given components: '{}' are not valid. Please check. \"\n \"A valid example is `components='2,3'`\"\n )\n\n if basis == 'diffmap':\n # remove the offset added in the case of diffmap, such that\n # plot_scatter can print the labels correctly.\n components_list = [\n tuple(number - 1 for number in comp) for comp in components_list\n ]\n else:\n data_points = [np.array(adata.obsm[basis_key])[:, offset : offset + n_dims]]\n components_list = []\n\n if img_key is not None:\n spatial_data = adata.uns[\"spatial\"][library_id]\n if f\"tissue_{img_key}_scalef\" in spatial_data['scalefactors'].keys():\n scalef_key = f\"tissue_{img_key}_scalef\"\n data_points[0] = np.multiply(\n data_points[0], spatial_data['scalefactors'][scalef_key],\n )\n else:\n raise KeyError(\n f\"Could not find entry in `adata.uns[spatial][{library_id}]` for '{img_key}'.\\n\"\n f\"Available keys are: {list(spatial_data['images'].keys())}.\"\n )\n elif img_key is None and basis is \"spatial\":\n data_points[0][:, 1] = np.abs(\n np.subtract(data_points[0][:, 1], np.max(data_points[0][:, 1]))\n )\n\n return data_points, components_list\n\n\ndef _add_legend_or_colorbar(\n adata,\n ax,\n cax,\n categorical,\n value_to_plot,\n legend_loc,\n scatter_array,\n legend_fontweight,\n legend_fontsize,\n legend_fontoutline,\n groups,\n multi_panel,\n):\n \"\"\"\n Adds a color bar or a legend to the given ax. A legend is added when the\n data is categorical and a color bar is added when a continuous value was used.\n\n \"\"\"\n # add legends or colorbars\n if categorical is True:\n # add legend to figure\n categories = list(adata.obs[value_to_plot].cat.categories)\n colors = adata.uns[value_to_plot + '_colors']\n\n if multi_panel is True:\n # Shrink current axis by 10% to fit legend and match\n # size of plots that are not categorical\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.91, box.height])\n\n if groups is not None:\n # only label groups with the respective color\n colors = [colors[categories.index(x)] for x in groups]\n categories = groups\n\n if legend_loc == 'right margin':\n for idx, label in enumerate(categories):\n color = colors[idx]\n # use empty scatter to set labels\n ax.scatter([], [], c=color, label=label)\n ax.legend(\n frameon=False,\n loc='center left',\n bbox_to_anchor=(1, 0.5),\n ncol=(\n 1 if len(categories) <= 14 else 2 if len(categories) <= 30 else 3\n ),\n fontsize=legend_fontsize,\n )\n\n if legend_loc == 'on data':\n # identify centroids to put labels\n all_pos = np.zeros((len(categories), 2))\n for ilabel, label in enumerate(categories):\n _scatter = scatter_array[adata.obs[value_to_plot] == label, :]\n x_pos, y_pos = np.median(_scatter, axis=0)\n\n ax.text(\n x_pos,\n y_pos,\n label,\n weight=legend_fontweight,\n verticalalignment='center',\n horizontalalignment='center',\n fontsize=legend_fontsize,\n path_effects=legend_fontoutline,\n )\n\n all_pos[ilabel] = [x_pos, y_pos]\n # this is temporary storage for access by other tools\n _utils._tmp_cluster_pos = all_pos\n else:\n # add colorbar to figure\n pl.colorbar(cax, ax=ax, pad=0.01, fraction=0.08, aspect=30)\n\n\ndef _get_color_values(\n adata,\n value_to_plot,\n groups=None,\n palette: Union[str, Sequence[str], Cycler, None] = None,\n use_raw=False,\n gene_symbols=None,\n layer=None,\n) -> Tuple[Union[np.ndarray, str], bool]:\n \"\"\"\n Returns the value or color associated to each data point.\n For categorical data, the return value is list of colors taken\n from the category palette or from the given `palette` value.\n\n For non-categorical data, the values are returned\n\n Returns\n -------\n values\n Values to plot\n is_categorical\n Are the values categorical?\n \"\"\"\n if value_to_plot is None:\n return \"lightgray\", False\n if (\n gene_symbols is not None\n and value_to_plot not in adata.obs.columns\n and value_to_plot not in adata.var_names\n ):\n # We should probably just make an index for this, and share it over runs\n value_to_plot = adata.var.index[adata.var[gene_symbols] == value_to_plot][\n 0\n ] # TODO: Throw helpful error if this doesn't work\n if use_raw and value_to_plot not in adata.obs.columns:\n values = adata.raw.obs_vector(value_to_plot)\n else:\n values = adata.obs_vector(value_to_plot, layer=layer)\n\n ###\n # when plotting, the color of the dots is determined for each plot\n # the data is either categorical or continuous and the data could be in\n # 'obs' or in 'var'\n if not is_categorical_dtype(values):\n return values, False\n else: # is_categorical_dtype(values)\n color_key = f\"{value_to_plot}_colors\"\n if palette:\n _utils._set_colors_for_categorical_obs(adata, value_to_plot, palette)\n elif color_key not in adata.uns or len(adata.uns[color_key]) < len(\n values.categories\n ):\n # set a default palette in case that no colors or few colors are found\n _utils._set_default_colors_for_categorical_obs(adata, value_to_plot)\n else:\n _utils._validate_palette(adata, value_to_plot)\n\n color_vector = np.asarray(adata.uns[color_key])[values.codes]\n\n # Handle groups\n if groups:\n color_vector = np.fromiter(\n map(colors.to_hex, color_vector), '<U15', len(color_vector)\n )\n # set color to 'light gray' for all values\n # that are not in the groups\n color_vector[~adata.obs[value_to_plot].isin(groups)] = \"lightgray\"\n return color_vector, True\n\n\ndef _basis2name(basis):\n \"\"\"\n converts the 'basis' into the proper name.\n \"\"\"\n\n component_name = (\n 'DC'\n if basis == 'diffmap'\n else 'tSNE'\n if basis == 'tsne'\n else 'UMAP'\n if basis == 'umap'\n else 'PC'\n if basis == 'pca'\n else basis.replace('draw_graph_', '').upper()\n if 'draw_graph' in basis\n else basis\n )\n return component_name\n\n\ndef _process_image(\n adata, data_points, img_key, crop_coord, scale_spot, library_id, bw=False\n):\n offset = 100\n cmap_img = None\n spatial_data = adata.uns['spatial'][library_id]\n img = spatial_data['images'][img_key]\n scalef_key = f\"tissue_{img_key}_scalef\"\n\n # 0.5 needed for optimal matching with spot boundaries\n # checked with detected_tissue_image.png\n spot_size = (\n (\n spatial_data['scalefactors'][scalef_key]\n * spatial_data['scalefactors']['spot_diameter_fullres']\n )\n * 0.5\n * scale_spot\n )\n\n if crop_coord is not None:\n crop_coord = np.asarray(crop_coord)\n if len(crop_coord) != 4:\n raise ValueError(\"Invalid crop_coord of length {len(crop_coord)}(!=4)\")\n img_coord = (\n *crop_coord[:2],\n *np.ceil(img.shape[0] - crop_coord[2:4]).astype(int),\n )\n else:\n img_coord = [\n data_points[0][:, 0].min() - offset,\n data_points[0][:, 0].max() + offset,\n data_points[0][:, 1].min() - offset,\n data_points[0][:, 1].max() + offset,\n ]\n\n if bw:\n img = np.dot(img[..., :3], [0.2989, 0.5870, 0.1140])\n cmap_img = \"gray\"\n\n return img, img_coord, spot_size, cmap_img\n",
"path": "scanpy/plotting/_tools/scatterplots.py"
}
] | [
{
"content": "import collections.abc as cabc\nfrom typing import Union, Optional, Sequence, Any, Mapping, List, Tuple, Callable\n\nimport numpy as np\nfrom anndata import AnnData\nfrom cycler import Cycler\nfrom matplotlib.axes import Axes\nfrom matplotlib.figure import Figure\nfrom pandas.api.types import is_categorical_dtype\nfrom matplotlib import pyplot as pl, colors\nfrom matplotlib import rcParams\nfrom matplotlib import patheffects\nfrom matplotlib.colors import Colormap\nfrom functools import partial\n\nfrom .. import _utils\nfrom .._utils import (\n _IGraphLayout,\n _FontWeight,\n _FontSize,\n circles,\n make_projection_available,\n)\nfrom .._docs import (\n doc_adata_color_etc,\n doc_edges_arrows,\n doc_scatter_embedding,\n doc_show_save_ax,\n)\nfrom ... import logging as logg\nfrom ..._settings import settings\nfrom ..._utils import sanitize_anndata, _doc_params, Empty, _empty\nfrom ..._compat import Literal\n\nVMinMax = Union[str, float, Callable[[Sequence[float]], float]]\n\n\n@_doc_params(\n adata_color_etc=doc_adata_color_etc,\n edges_arrows=doc_edges_arrows,\n scatter_bulk=doc_scatter_embedding,\n show_save_ax=doc_show_save_ax,\n)\ndef embedding(\n adata: AnnData,\n basis: str,\n *,\n color: Union[str, Sequence[str], None] = None,\n gene_symbols: Optional[str] = None,\n use_raw: Optional[bool] = None,\n sort_order: bool = True,\n edges: bool = False,\n edges_width: float = 0.1,\n edges_color: Union[str, Sequence[float], Sequence[str]] = 'grey',\n neighbors_key: Optional[str] = None,\n arrows: bool = False,\n arrows_kwds: Optional[Mapping[str, Any]] = None,\n groups: Optional[str] = None,\n components: Union[str, Sequence[str]] = None,\n layer: Optional[str] = None,\n projection: Literal['2d', '3d'] = '2d',\n # image parameters\n img_key: Optional[str] = None,\n crop_coord: Tuple[int, int, int, int] = None,\n alpha_img: float = 1.0,\n bw: bool = False,\n library_id: str = None,\n #\n color_map: Union[Colormap, str, None] = None,\n palette: Union[str, Sequence[str], Cycler, None] = None,\n size: Union[float, Sequence[float], None] = None,\n frameon: Optional[bool] = None,\n legend_fontsize: Union[int, float, _FontSize, None] = None,\n legend_fontweight: Union[int, _FontWeight] = 'bold',\n legend_loc: str = 'right margin',\n legend_fontoutline: Optional[int] = None,\n vmax: Union[VMinMax, Sequence[VMinMax], None] = None,\n vmin: Union[VMinMax, Sequence[VMinMax], None] = None,\n add_outline: Optional[bool] = False,\n outline_width: Tuple[float, float] = (0.3, 0.05),\n outline_color: Tuple[str, str] = ('black', 'white'),\n ncols: int = 4,\n hspace: float = 0.25,\n wspace: Optional[float] = None,\n title: Union[str, Sequence[str], None] = None,\n show: Optional[bool] = None,\n save: Union[bool, str, None] = None,\n ax: Optional[Axes] = None,\n return_fig: Optional[bool] = None,\n **kwargs,\n) -> Union[Figure, Axes, None]:\n \"\"\"\\\n Scatter plot for user specified embedding basis (e.g. umap, pca, etc)\n\n Parameters\n ----------\n basis\n Name of the `obsm` basis to use.\n {adata_color_etc}\n {edges_arrows}\n {scatter_bulk}\n {show_save_ax}\n\n Returns\n -------\n If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.\n \"\"\"\n\n sanitize_anndata(adata)\n if color_map is not None:\n kwargs['cmap'] = color_map\n if size is not None:\n kwargs['s'] = size\n if 'edgecolor' not in kwargs:\n # by default turn off edge color. Otherwise, for\n # very small sizes the edge will not reduce its size\n # (https://github.com/theislab/scanpy/issues/293)\n kwargs['edgecolor'] = 'none'\n\n if groups:\n if isinstance(groups, str):\n groups = [groups]\n\n make_projection_available(projection)\n args_3d = dict(projection='3d') if projection == '3d' else {}\n\n # Deal with Raw\n if use_raw is None:\n # check if adata.raw is set\n use_raw = layer is None and adata.raw is not None\n if use_raw and layer is not None:\n raise ValueError(\n \"Cannot use both a layer and the raw representation. Was passed:\"\n f\"use_raw={use_raw}, layer={layer}.\"\n )\n\n if wspace is None:\n # try to set a wspace that is not too large or too small given the\n # current figure size\n wspace = 0.75 / rcParams['figure.figsize'][0] + 0.02\n if adata.raw is None and use_raw:\n raise ValueError(\n \"`use_raw` is set to True but AnnData object does not have raw. \"\n \"Please check.\"\n )\n # turn color into a python list\n color = [color] if isinstance(color, str) or color is None else list(color)\n if title is not None:\n # turn title into a python list if not None\n title = [title] if isinstance(title, str) else list(title)\n\n # get the points position and the components list\n # (only if components is not None)\n data_points, components_list = _get_data_points(\n adata, basis, projection, components, img_key, library_id\n )\n\n # Setup layout.\n # Most of the code is for the case when multiple plots are required\n # 'color' is a list of names that want to be plotted.\n # Eg. ['Gene1', 'louvain', 'Gene2'].\n # component_list is a list of components [[0,1], [1,2]]\n if (\n not isinstance(color, str)\n and isinstance(color, cabc.Sequence)\n and len(color) > 1\n ) or len(components_list) > 1:\n if ax is not None:\n raise ValueError(\n \"Cannot specify `ax` when plotting multiple panels \"\n \"(each for a given value of 'color').\"\n )\n if len(components_list) == 0:\n components_list = [None]\n\n # each plot needs to be its own panel\n num_panels = len(color) * len(components_list)\n fig, grid = _panel_grid(hspace, wspace, ncols, num_panels)\n else:\n if len(components_list) == 0:\n components_list = [None]\n grid = None\n if ax is None:\n fig = pl.figure()\n ax = fig.add_subplot(111, **args_3d)\n\n # turn vmax and vmin into a sequence\n if isinstance(vmax, str) or not isinstance(vmax, cabc.Sequence):\n vmax = [vmax]\n if isinstance(vmin, str) or not isinstance(vmin, cabc.Sequence):\n vmin = [vmin]\n\n if 's' in kwargs:\n size = kwargs.pop('s')\n\n if size is not None:\n # check if size is any type of sequence, and if so\n # set as ndarray\n import pandas.core.series\n\n if (\n size is not None\n and isinstance(\n size, (cabc.Sequence, pandas.core.series.Series, np.ndarray,)\n )\n and len(size) == adata.shape[0]\n ):\n size = np.array(size, dtype=float)\n else:\n size = 120000 / adata.shape[0]\n\n ###\n # make the plots\n axs = []\n import itertools\n\n idx_components = range(len(components_list))\n\n # use itertools.product to make a plot for each color and for each component\n # For example if color=[gene1, gene2] and components=['1,2, '2,3'].\n # The plots are: [\n # color=gene1, components=[1,2], color=gene1, components=[2,3],\n # color=gene2, components = [1, 2], color=gene2, components=[2,3],\n # ]\n for count, (value_to_plot, component_idx) in enumerate(\n itertools.product(color, idx_components)\n ):\n color_vector, categorical = _get_color_values(\n adata,\n value_to_plot,\n layer=layer,\n groups=groups,\n palette=palette,\n use_raw=use_raw,\n gene_symbols=gene_symbols,\n )\n\n # check if higher value points should be plot on top\n if sort_order is True and value_to_plot is not None and categorical is False:\n order = np.argsort(color_vector)\n color_vector = color_vector[order]\n _data_points = data_points[component_idx][order, :]\n\n # check if 'size' is given (stored in kwargs['s']\n # and reorder it.\n if isinstance(size, np.ndarray):\n size = np.array(size)[order]\n else:\n _data_points = data_points[component_idx]\n\n # if plotting multiple panels, get the ax from the grid spec\n # else use the ax value (either user given or created previously)\n if grid:\n ax = pl.subplot(grid[count], **args_3d)\n axs.append(ax)\n if not (settings._frameon if frameon is None else frameon):\n ax.axis('off')\n if title is None:\n if value_to_plot is not None:\n ax.set_title(value_to_plot)\n else:\n ax.set_title('')\n else:\n try:\n ax.set_title(title[count])\n except IndexError:\n logg.warning(\n \"The title list is shorter than the number of panels. \"\n \"Using 'color' value instead for some plots.\"\n )\n ax.set_title(value_to_plot)\n\n # check vmin and vmax options\n if categorical:\n kwargs['vmin'] = kwargs['vmax'] = None\n else:\n kwargs['vmin'], kwargs['vmax'] = _get_vmin_vmax(\n vmin, vmax, count, color_vector\n )\n\n # make the scatter plot\n if projection == '3d':\n cax = ax.scatter(\n _data_points[:, 0],\n _data_points[:, 1],\n _data_points[:, 2],\n marker=\".\",\n c=color_vector,\n rasterized=settings._vector_friendly,\n **kwargs,\n )\n else:\n if img_key is not None:\n # had to return size_spot cause spot size is set according\n # to the image to be plotted\n img_processed, img_coord, size_spot, cmap_img = _process_image(\n adata, data_points, img_key, crop_coord, size, library_id, bw\n )\n ax.imshow(img_processed, cmap=cmap_img, alpha=alpha_img)\n ax.set_xlim(img_coord[0], img_coord[1])\n ax.set_ylim(img_coord[3], img_coord[2])\n elif img_key is None and library_id is not None:\n # order of magnitude similar to public visium\n size_spot = 70 * size\n\n scatter = (\n partial(ax.scatter, s=size)\n if library_id is None\n else partial(circles, s=size_spot, ax=ax)\n )\n\n if add_outline:\n # the default outline is a black edge followed by a\n # thin white edged added around connected clusters.\n # To add an outline\n # three overlapping scatter plots are drawn:\n # First black dots with slightly larger size,\n # then, white dots a bit smaller, but still larger\n # than the final dots. Then the final dots are drawn\n # with some transparency.\n\n bg_width, gap_width = outline_width\n point = np.sqrt(size)\n gap_size = (point + (point * gap_width) * 2) ** 2\n bg_size = (np.sqrt(gap_size) + (point * bg_width) * 2) ** 2\n # the default black and white colors can be changes using\n # the contour_config parameter\n bg_color, gap_color = outline_color\n\n # remove edge from kwargs if present\n # because edge needs to be set to None\n kwargs['edgecolor'] = 'none'\n\n # remove alpha for outline\n alpha = kwargs.pop('alpha') if 'alpha' in kwargs else None\n\n ax.scatter(\n _data_points[:, 0],\n _data_points[:, 1],\n s=bg_size,\n marker=\".\",\n c=bg_color,\n rasterized=settings._vector_friendly,\n **kwargs,\n )\n ax.scatter(\n _data_points[:, 0],\n _data_points[:, 1],\n s=gap_size,\n marker=\".\",\n c=gap_color,\n rasterized=settings._vector_friendly,\n **kwargs,\n )\n # if user did not set alpha, set alpha to 0.7\n kwargs['alpha'] = 0.7 if alpha is None else alpha\n\n if groups:\n # first plot non-groups and then plot the\n # required groups on top\n\n in_groups = np.array(adata.obs[value_to_plot].isin(groups))\n\n if isinstance(size, np.ndarray):\n in_groups_size = size[in_groups]\n not_in_groups_size = size[~in_groups]\n elif img_key is not None:\n in_groups_size = not_in_groups_size = size_spot\n else:\n in_groups_size = not_in_groups_size = size\n\n # only show grey points if no image is below\n if library_id is None:\n ax.scatter(\n _data_points[~in_groups, 0],\n _data_points[~in_groups, 1],\n s=not_in_groups_size,\n marker=\".\",\n c=color_vector[~in_groups],\n rasterized=settings._vector_friendly,\n **kwargs,\n )\n cax = scatter(\n _data_points[in_groups, 0],\n _data_points[in_groups, 1],\n s=in_groups_size,\n marker=\".\",\n c=color_vector[in_groups],\n rasterized=settings._vector_friendly,\n **kwargs,\n )\n\n else:\n cax = scatter(\n _data_points[:, 0],\n _data_points[:, 1],\n marker=\".\",\n c=color_vector,\n rasterized=settings._vector_friendly,\n **kwargs,\n )\n\n # remove y and x ticks\n ax.set_yticks([])\n ax.set_xticks([])\n if projection == '3d':\n ax.set_zticks([])\n\n # set default axis_labels\n name = _basis2name(basis)\n if components is not None:\n axis_labels = [name + str(x + 1) for x in components_list[component_idx]]\n elif projection == '3d':\n axis_labels = [name + str(x + 1) for x in range(3)]\n\n else:\n axis_labels = [name + str(x + 1) for x in range(2)]\n\n ax.set_xlabel(axis_labels[0])\n ax.set_ylabel(axis_labels[1])\n if projection == '3d':\n # shift the label closer to the axis\n ax.set_zlabel(axis_labels[2], labelpad=-7)\n ax.autoscale_view()\n\n if edges:\n _utils.plot_edges(ax, adata, basis, edges_width, edges_color, neighbors_key)\n if arrows:\n _utils.plot_arrows(ax, adata, basis, arrows_kwds)\n\n if value_to_plot is None:\n # if only dots were plotted without an associated value\n # there is not need to plot a legend or a colorbar\n continue\n\n if legend_fontoutline is not None:\n path_effect = [\n patheffects.withStroke(linewidth=legend_fontoutline, foreground='w',)\n ]\n else:\n path_effect = None\n\n _add_legend_or_colorbar(\n adata,\n ax,\n cax,\n categorical,\n value_to_plot,\n legend_loc,\n _data_points,\n legend_fontweight,\n legend_fontsize,\n path_effect,\n groups,\n bool(grid),\n )\n\n if return_fig is True:\n return fig\n axs = axs if grid else ax\n _utils.savefig_or_show(basis, show=show, save=save)\n if show is False:\n return axs\n\n\ndef _panel_grid(hspace, wspace, ncols, num_panels):\n from matplotlib import gridspec\n\n n_panels_x = min(ncols, num_panels)\n n_panels_y = np.ceil(num_panels / n_panels_x).astype(int)\n # each panel will have the size of rcParams['figure.figsize']\n fig = pl.figure(\n figsize=(\n n_panels_x * rcParams['figure.figsize'][0] * (1 + wspace),\n n_panels_y * rcParams['figure.figsize'][1],\n ),\n )\n left = 0.2 / n_panels_x\n bottom = 0.13 / n_panels_y\n gs = gridspec.GridSpec(\n nrows=n_panels_y,\n ncols=n_panels_x,\n left=left,\n right=1 - (n_panels_x - 1) * left - 0.01 / n_panels_x,\n bottom=bottom,\n top=1 - (n_panels_y - 1) * bottom - 0.1 / n_panels_y,\n hspace=hspace,\n wspace=wspace,\n )\n return fig, gs\n\n\ndef _get_vmin_vmax(\n vmin: Sequence[VMinMax],\n vmax: Sequence[VMinMax],\n index: int,\n color_vector: Sequence[float],\n) -> Tuple[Union[float, None], Union[float, None]]:\n\n \"\"\"\n Evaluates the value of vmin and vmax, which could be a\n str in which case is interpreted as a percentile and should\n be specified in the form 'pN' where N is the percentile.\n Eg. for a percentile of 85 the format would be 'p85'.\n Floats are accepted as p99.9\n\n Alternatively, vmin/vmax could be a function that is applied to\n the list of color values (`color_vector`). E.g.\n\n def my_vmax(color_vector): np.percentile(color_vector, p=80)\n\n\n Parameters\n ----------\n index\n This index of the plot\n color_vector\n List or values for the plot\n\n Returns\n -------\n\n (vmin, vmax) containing None or float values\n\n \"\"\"\n out = []\n for v_name, v in [('vmin', vmin), ('vmax', vmax)]:\n if len(v) == 1:\n # this case usually happens when the user sets eg vmax=0.9, which\n # is internally converted into list of len=1, but is expected that this\n # value applies to all plots.\n v_value = v[0]\n else:\n try:\n v_value = v[index]\n except IndexError:\n logg.error(\n f\"The parameter {v_name} is not valid. If setting multiple {v_name} values,\"\n f\"check that the length of the {v_name} list is equal to the number \"\n \"of plots. \"\n )\n v_value = None\n\n if v_value is not None:\n if isinstance(v_value, str) and v_value.startswith('p'):\n try:\n float(v_value[1:])\n except ValueError:\n logg.error(\n f\"The parameter {v_name}={v_value} for plot number {index + 1} is not valid. \"\n f\"Please check the correct format for percentiles.\"\n )\n # interpret value of vmin/vmax as quantile with the following syntax 'p99.9'\n v_value = np.percentile(color_vector, q=float(v_value[1:]))\n elif callable(v_value):\n # interpret vmin/vmax as function\n v_value = v_value(color_vector)\n if not isinstance(v_value, float):\n logg.error(\n f\"The return of the function given for {v_name} is not valid. \"\n \"Please check that the function returns a number.\"\n )\n v_value = None\n else:\n try:\n float(v_value)\n except ValueError:\n logg.error(\n f\"The given {v_name}={v_value} for plot number {index + 1} is not valid. \"\n f\"Please check that the value given is a valid number, a string \"\n f\"starting with 'p' for percentiles or a valid function.\"\n )\n v_value = None\n out.append(v_value)\n return tuple(out)\n\n\ndef _wraps_plot_scatter(wrapper):\n annots_orig = {\n k: v for k, v in wrapper.__annotations__.items() if k not in {'adata', 'kwargs'}\n }\n annots_scatter = {\n k: v for k, v in embedding.__annotations__.items() if k != 'basis'\n }\n wrapper.__annotations__ = {**annots_scatter, **annots_orig}\n wrapper.__wrapped__ = embedding\n return wrapper\n\n\n# API\n\n\n@_wraps_plot_scatter\n@_doc_params(\n adata_color_etc=doc_adata_color_etc,\n edges_arrows=doc_edges_arrows,\n scatter_bulk=doc_scatter_embedding,\n show_save_ax=doc_show_save_ax,\n)\ndef umap(adata, **kwargs) -> Union[Axes, List[Axes], None]:\n \"\"\"\\\n Scatter plot in UMAP basis.\n\n Parameters\n ----------\n {adata_color_etc}\n {edges_arrows}\n {scatter_bulk}\n {show_save_ax}\n\n Returns\n -------\n If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.\n \"\"\"\n return embedding(adata, 'umap', **kwargs)\n\n\n@_wraps_plot_scatter\n@_doc_params(\n adata_color_etc=doc_adata_color_etc,\n edges_arrows=doc_edges_arrows,\n scatter_bulk=doc_scatter_embedding,\n show_save_ax=doc_show_save_ax,\n)\ndef tsne(adata, **kwargs) -> Union[Axes, List[Axes], None]:\n \"\"\"\\\n Scatter plot in tSNE basis.\n\n Parameters\n ----------\n {adata_color_etc}\n {edges_arrows}\n {scatter_bulk}\n {show_save_ax}\n\n Returns\n -------\n If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.\n \"\"\"\n return embedding(adata, 'tsne', **kwargs)\n\n\n@_wraps_plot_scatter\n@_doc_params(\n adata_color_etc=doc_adata_color_etc,\n scatter_bulk=doc_scatter_embedding,\n show_save_ax=doc_show_save_ax,\n)\ndef diffmap(adata, **kwargs) -> Union[Axes, List[Axes], None]:\n \"\"\"\\\n Scatter plot in Diffusion Map basis.\n\n Parameters\n ----------\n {adata_color_etc}\n {scatter_bulk}\n {show_save_ax}\n\n Returns\n -------\n If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.\n \"\"\"\n return embedding(adata, 'diffmap', **kwargs)\n\n\n@_wraps_plot_scatter\n@_doc_params(\n adata_color_etc=doc_adata_color_etc,\n edges_arrows=doc_edges_arrows,\n scatter_bulk=doc_scatter_embedding,\n show_save_ax=doc_show_save_ax,\n)\ndef draw_graph(\n adata: AnnData, layout: Optional[_IGraphLayout] = None, **kwargs,\n) -> Union[Axes, List[Axes], None]:\n \"\"\"\\\n Scatter plot in graph-drawing basis.\n\n Parameters\n ----------\n {adata_color_etc}\n layout\n One of the :func:`~scanpy.tl.draw_graph` layouts.\n By default, the last computed layout is used.\n {edges_arrows}\n {scatter_bulk}\n {show_save_ax}\n\n Returns\n -------\n If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.\n \"\"\"\n if layout is None:\n layout = str(adata.uns['draw_graph']['params']['layout'])\n basis = 'draw_graph_' + layout\n if 'X_' + basis not in adata.obsm_keys():\n raise ValueError(\n 'Did not find {} in adata.obs. Did you compute layout {}?'.format(\n 'draw_graph_' + layout, layout\n )\n )\n\n return embedding(adata, basis, **kwargs)\n\n\n@_wraps_plot_scatter\n@_doc_params(\n adata_color_etc=doc_adata_color_etc,\n scatter_bulk=doc_scatter_embedding,\n show_save_ax=doc_show_save_ax,\n)\ndef pca(adata, **kwargs) -> Union[Axes, List[Axes], None]:\n \"\"\"\\\n Scatter plot in PCA coordinates.\n\n Parameters\n ----------\n {adata_color_etc}\n {scatter_bulk}\n {show_save_ax}\n\n Returns\n -------\n If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.\n \"\"\"\n return embedding(adata, 'pca', **kwargs)\n\n\n@_wraps_plot_scatter\n@_doc_params(\n adata_color_etc=doc_adata_color_etc,\n scatter_bulk=doc_scatter_embedding,\n show_save_ax=doc_show_save_ax,\n)\ndef spatial(\n adata,\n *,\n img_key: Union[str, None, Empty] = _empty,\n library_id: Union[str, Empty] = _empty,\n crop_coord: Tuple[int, int, int, int] = None,\n alpha_img: float = 1.0,\n bw: bool = False,\n size: float = None,\n **kwargs,\n) -> Union[Axes, List[Axes], None]:\n \"\"\"\\\n Scatter plot in spatial coordinates.\n\n Use the parameter `img_key` to see the image in the background\n And the parameter `library_id` to select the image.\n By default, `'hires'` and `'lowres'` are attempted.\n Also by default the first entry of `library_id` is attempted.\n Use `crop_coord`, `alpha_img`, and `bw` to control how it is displayed.\n Use `size` to scale the size of the Visium spots plotted on top.\n\n Parameters\n ----------\n {adata_color_etc}\n {scatter_bulk}\n {show_save_ax}\n\n Returns\n -------\n If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.\n \"\"\"\n if library_id is _empty:\n library_id = next((i for i in adata.uns['spatial'].keys()))\n else:\n if library_id not in adata.uns['spatial'].keys():\n raise KeyError(\n f\"Could not find '{library_id}' in adata.uns['spatial'].keys().\\n\"\n f\"Available keys are: {list(adata.uns['spatial'].keys())}.\"\n )\n\n spatial_data = adata.uns['spatial'][library_id]\n if img_key is _empty:\n img_key = next(\n (k for k in ['hires', 'lowres'] if k in spatial_data['images']), None,\n )\n\n if size is None:\n size = 1.0\n\n return embedding(\n adata,\n 'spatial',\n img_key=img_key,\n crop_coord=crop_coord,\n alpha_img=alpha_img,\n bw=bw,\n library_id=library_id,\n size=size,\n **kwargs,\n )\n\n\n# Helpers\n\n\ndef _get_data_points(\n adata, basis, projection, components, img_key, library_id\n) -> Tuple[List[np.ndarray], List[Tuple[int, int]]]:\n \"\"\"\n Returns the data points corresponding to the selected basis, projection and/or components.\n\n Because multiple components are given (eg components=['1,2', '2,3'] the\n returned data are lists, containing each of the components. When only one component is plotted\n the list length is 1.\n\n Returns\n -------\n data_points\n Each entry is a numpy array containing the data points\n components\n The cleaned list of components. Eg. [(0,1)] or [(0,1), (1,2)]\n for components = [1,2] and components=['1,2', '2,3'] respectively\n \"\"\"\n\n if basis in adata.obsm.keys():\n basis_key = basis\n\n elif f\"X_{basis}\" in adata.obsm.keys():\n basis_key = f\"X_{basis}\"\n else:\n raise KeyError(\n f\"Could not find entry in `obsm` for '{basis}'.\\n\"\n f\"Available keys are: {list(adata.obsm.keys())}.\"\n )\n\n n_dims = 2\n if projection == '3d':\n # check if the data has a third dimension\n if adata.obsm[basis_key].shape[1] == 2:\n if settings._low_resolution_warning:\n logg.warning(\n 'Selected projections is \"3d\" but only two dimensions '\n 'are available. Only these two dimensions will be plotted'\n )\n else:\n n_dims = 3\n\n if components == 'all':\n from itertools import combinations\n\n r_value = 3 if projection == '3d' else 2\n _components_list = np.arange(adata.obsm[basis_key].shape[1]) + 1\n components = [\n \",\".join(map(str, x)) for x in combinations(_components_list, r=r_value)\n ]\n\n components_list = []\n offset = 0\n if basis == 'diffmap':\n offset = 1\n if components is not None:\n # components have different formats, either a list with integers, a string\n # or a list of strings.\n\n if isinstance(components, str):\n # eg: components='1,2'\n components_list.append(\n tuple(int(x.strip()) - 1 + offset for x in components.split(','))\n )\n\n elif isinstance(components, cabc.Sequence):\n if isinstance(components[0], int):\n # components=[1,2]\n components_list.append(tuple(int(x) - 1 + offset for x in components))\n else:\n # in this case, the components are str\n # eg: components=['1,2'] or components=['1,2', '2,3]\n # More than one component can be given and is stored\n # as a new item of components_list\n for comp in components:\n components_list.append(\n tuple(int(x.strip()) - 1 + offset for x in comp.split(','))\n )\n\n else:\n raise ValueError(\n \"Given components: '{}' are not valid. Please check. \"\n \"A valid example is `components='2,3'`\"\n )\n # check if the components are present in the data\n try:\n data_points = []\n for comp in components_list:\n data_points.append(adata.obsm[basis_key][:, comp])\n except:\n raise ValueError(\n \"Given components: '{}' are not valid. Please check. \"\n \"A valid example is `components='2,3'`\"\n )\n\n if basis == 'diffmap':\n # remove the offset added in the case of diffmap, such that\n # plot_scatter can print the labels correctly.\n components_list = [\n tuple(number - 1 for number in comp) for comp in components_list\n ]\n else:\n data_points = [np.array(adata.obsm[basis_key])[:, offset : offset + n_dims]]\n components_list = []\n\n if img_key is not None:\n spatial_data = adata.uns[\"spatial\"][library_id]\n if f\"tissue_{img_key}_scalef\" in spatial_data['scalefactors'].keys():\n scalef_key = f\"tissue_{img_key}_scalef\"\n data_points[0] = np.multiply(\n data_points[0], spatial_data['scalefactors'][scalef_key],\n )\n else:\n raise KeyError(\n f\"Could not find entry in `adata.uns[spatial][{library_id}]` for '{img_key}'.\\n\"\n f\"Available keys are: {list(spatial_data['images'].keys())}.\"\n )\n elif img_key is None and basis is \"spatial\":\n data_points[0][:, 1] = np.abs(\n np.subtract(data_points[0][:, 1], np.max(data_points[0][:, 1]))\n )\n\n return data_points, components_list\n\n\ndef _add_legend_or_colorbar(\n adata,\n ax,\n cax,\n categorical,\n value_to_plot,\n legend_loc,\n scatter_array,\n legend_fontweight,\n legend_fontsize,\n legend_fontoutline,\n groups,\n multi_panel,\n):\n \"\"\"\n Adds a color bar or a legend to the given ax. A legend is added when the\n data is categorical and a color bar is added when a continuous value was used.\n\n \"\"\"\n # add legends or colorbars\n if categorical is True:\n # add legend to figure\n categories = list(adata.obs[value_to_plot].cat.categories)\n colors = adata.uns[value_to_plot + '_colors']\n\n if multi_panel is True:\n # Shrink current axis by 10% to fit legend and match\n # size of plots that are not categorical\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.91, box.height])\n\n if groups is not None:\n # only label groups with the respective color\n colors = [colors[categories.index(x)] for x in groups]\n categories = groups\n\n if legend_loc == 'right margin':\n for idx, label in enumerate(categories):\n color = colors[idx]\n # use empty scatter to set labels\n ax.scatter([], [], c=color, label=label)\n ax.legend(\n frameon=False,\n loc='center left',\n bbox_to_anchor=(1, 0.5),\n ncol=(\n 1 if len(categories) <= 14 else 2 if len(categories) <= 30 else 3\n ),\n fontsize=legend_fontsize,\n )\n\n if legend_loc == 'on data':\n # identify centroids to put labels\n all_pos = np.zeros((len(categories), 2))\n for ilabel, label in enumerate(categories):\n _scatter = scatter_array[adata.obs[value_to_plot] == label, :]\n x_pos, y_pos = np.median(_scatter, axis=0)\n\n ax.text(\n x_pos,\n y_pos,\n label,\n weight=legend_fontweight,\n verticalalignment='center',\n horizontalalignment='center',\n fontsize=legend_fontsize,\n path_effects=legend_fontoutline,\n )\n\n all_pos[ilabel] = [x_pos, y_pos]\n # this is temporary storage for access by other tools\n _utils._tmp_cluster_pos = all_pos\n else:\n # add colorbar to figure\n pl.colorbar(cax, ax=ax, pad=0.01, fraction=0.08, aspect=30)\n\n\ndef _get_color_values(\n adata,\n value_to_plot,\n groups=None,\n palette: Union[str, Sequence[str], Cycler, None] = None,\n use_raw=False,\n gene_symbols=None,\n layer=None,\n) -> Tuple[Union[np.ndarray, str], bool]:\n \"\"\"\n Returns the value or color associated to each data point.\n For categorical data, the return value is list of colors taken\n from the category palette or from the given `palette` value.\n\n For non-categorical data, the values are returned\n\n Returns\n -------\n values\n Values to plot\n is_categorical\n Are the values categorical?\n \"\"\"\n if value_to_plot is None:\n return \"lightgray\", False\n if (\n gene_symbols is not None\n and value_to_plot not in adata.obs.columns\n and value_to_plot not in adata.var_names\n ):\n # We should probably just make an index for this, and share it over runs\n value_to_plot = adata.var.index[adata.var[gene_symbols] == value_to_plot][\n 0\n ] # TODO: Throw helpful error if this doesn't work\n if use_raw and value_to_plot not in adata.obs.columns:\n values = adata.raw.obs_vector(value_to_plot)\n else:\n values = adata.obs_vector(value_to_plot, layer=layer)\n\n ###\n # when plotting, the color of the dots is determined for each plot\n # the data is either categorical or continuous and the data could be in\n # 'obs' or in 'var'\n if not is_categorical_dtype(values):\n return values, False\n else: # is_categorical_dtype(values)\n color_key = f\"{value_to_plot}_colors\"\n if palette:\n _utils._set_colors_for_categorical_obs(adata, value_to_plot, palette)\n elif color_key not in adata.uns or len(adata.uns[color_key]) < len(\n values.categories\n ):\n # set a default palette in case that no colors or few colors are found\n _utils._set_default_colors_for_categorical_obs(adata, value_to_plot)\n else:\n _utils._validate_palette(adata, value_to_plot)\n\n color_vector = np.asarray(adata.uns[color_key])[values.codes]\n\n # Handle groups\n if groups:\n color_vector = np.fromiter(\n map(colors.to_hex, color_vector), '<U15', len(color_vector)\n )\n # set color to 'light gray' for all values\n # that are not in the groups\n color_vector[~adata.obs[value_to_plot].isin(groups)] = \"lightgray\"\n return color_vector, True\n\n\ndef _basis2name(basis):\n \"\"\"\n converts the 'basis' into the proper name.\n \"\"\"\n\n component_name = (\n 'DC'\n if basis == 'diffmap'\n else 'tSNE'\n if basis == 'tsne'\n else 'UMAP'\n if basis == 'umap'\n else 'PC'\n if basis == 'pca'\n else basis.replace('draw_graph_', '').upper()\n if 'draw_graph' in basis\n else basis\n )\n return component_name\n\n\ndef _process_image(\n adata, data_points, img_key, crop_coord, scale_spot, library_id, bw=False\n):\n offset = 100\n cmap_img = None\n spatial_data = adata.uns['spatial'][library_id]\n img = spatial_data['images'][img_key]\n scalef_key = f\"tissue_{img_key}_scalef\"\n\n # 0.5 needed for optimal matching with spot boundaries\n # checked with detected_tissue_image.png\n spot_size = (\n (\n spatial_data['scalefactors'][scalef_key]\n * spatial_data['scalefactors']['spot_diameter_fullres']\n )\n * 0.5\n * scale_spot\n )\n\n if crop_coord is not None:\n crop_coord = np.asarray(crop_coord)\n if len(crop_coord) != 4:\n raise ValueError(\"Invalid crop_coord of length {len(crop_coord)}(!=4)\")\n img_coord = (\n *crop_coord[:2],\n *np.ceil(img.shape[0] - crop_coord[2:4]).astype(int),\n )\n else:\n img_coord = [\n data_points[0][:, 0].min() - offset,\n data_points[0][:, 0].max() + offset,\n data_points[0][:, 1].min() - offset,\n data_points[0][:, 1].max() + offset,\n ]\n\n if bw:\n img = np.dot(img[..., :3], [0.2989, 0.5870, 0.1140])\n cmap_img = \"gray\"\n\n return img, img_coord, spot_size, cmap_img\n",
"path": "scanpy/plotting/_tools/scatterplots.py"
}
] | diff --git a/scanpy/plotting/_tools/scatterplots.py b/scanpy/plotting/_tools/scatterplots.py
index 51ed3870d3..fcaf94e827 100644
--- a/scanpy/plotting/_tools/scatterplots.py
+++ b/scanpy/plotting/_tools/scatterplots.py
@@ -778,7 +778,7 @@ def spatial(
(k for k in ['hires', 'lowres'] if k in spatial_data['images']), None,
)
- if img_key is None and size is None:
+ if size is None:
size = 1.0
return embedding(
diff --git a/scanpy/tests/_images/master_spatial_visium_default.png b/scanpy/tests/_images/master_spatial_visium_default.png
new file mode 100644
index 0000000000..b2f805b07a
Binary files /dev/null and b/scanpy/tests/_images/master_spatial_visium_default.png differ
diff --git a/scanpy/tests/test_plotting.py b/scanpy/tests/test_plotting.py
index 1b1b15091c..2e3c46de95 100644
--- a/scanpy/tests/test_plotting.py
+++ b/scanpy/tests/test_plotting.py
@@ -943,6 +943,16 @@ def test_visium_circles(image_comparer):
save_and_compare_images('master_spatial_visium')
+def test_visium_default(image_comparer):
+ save_and_compare_images = image_comparer(ROOT, FIGS, tol=15)
+ adata = sc.read_visium(HERE / '_data' / 'visium_data' / '1.0.0')
+ adata.obs = adata.obs.astype({'array_row': 'str'})
+
+ sc.pl.spatial(adata,)
+
+ save_and_compare_images('master_spatial_visium_default')
+
+
def test_visium_empty_img_key(image_comparer):
save_and_compare_images = image_comparer(ROOT, FIGS, tol=15)
adata = sc.read_visium(HERE / '_data' / 'visium_data' / '1.0.0')
|
electricitymaps__electricitymaps-contrib-1223 | AR: Plant mappings missing
March 16th 2018, 12:14:50.317 | WARNING | AR | ALUATG08 is missing from the AR plant mapping!
March 16th 2018, 12:14:50.317 | WARNING | AR | ALUATG07 is missing from the AR plant mapping!
March 16th 2018, 12:14:50.316 | WARNING | AR | ALUATV01 is missing from the AR plant mapping!
| [
{
"content": "#!/usr/bin/env python3\n\nimport itertools\nimport re\nimport string\n\nimport arrow\nimport requests\nfrom bs4 import BeautifulSoup\n\ntry:\n unicode # Python 2\nexcept NameError:\n unicode = str # Python 3\n\n# This parser gets hourly electricity generation data from portalweb.cammesa.com/Memnet1/default.aspx\n# for Argentina. Currently wind and solar power are small contributors and not monitored but this is\n# likely to change in the future.\n\n# Useful links.\n# https://en.wikipedia.org/wiki/Electricity_sector_in_Argentina\n# https://en.wikipedia.org/wiki/List_of_power_stations_in_Argentina\n# http://globalenergyobservatory.org/countryid/10#\n# http://www.industcards.com/st-other-argentina.htm\n\n\n# Map of power plants to generation type.\n# http://portalweb.cammesa.com/memnet1/revistas/estacional/base_gen.html\n\npower_plant_type = {\n 'ABRODI01': 'gas',\n 'ACAJTG01': 'gas',\n 'ACAJTG02': 'gas',\n 'ACAJTG03': 'gas',\n 'ACAJTG04': 'gas',\n 'ACAJTG05': 'gas',\n 'ACAJTG06': 'gas',\n 'ACAJTV07': 'gas',\n 'ADTOHI': 'hydro',\n 'AESPTG01': 'gas',\n 'AESPTG02': 'gas',\n 'AESPTV01': 'gas',\n 'ALEMDI01': 'oil',\n 'ALICHI': 'hydro',\n 'ALOMDI01': 'gas',\n 'ALUMDI01': 'oil',\n 'AMEGHI': 'hydro',\n 'ANATDI01': 'gas',\n 'ANATDI02': 'gas',\n 'ANCHDI01': 'oil',\n 'ANCHDI02': 'oil',\n 'ANCHDI03': 'oil',\n 'ANCHDI04': 'oil',\n 'APARTV01': 'gas',\n 'ARA2EO': 'hydro',\n 'ARAUEO': 'hydro',\n 'ARGETG01': 'gas',\n 'ARISDI01': 'oil',\n 'ARMATG01': 'gas',\n 'ARMATG02': 'gas',\n 'ARMATG03': 'gas',\n 'ARREDI01': 'gas',\n 'ARROHI': 'hydro',\n 'ATUCNUCL': 'nuclear',\n 'ATU2NUCL': 'nuclear',\n 'AVALTG21': 'gas',\n 'AVALTG22': 'gas',\n 'AVALTG23': 'gas',\n 'AVALTV11': 'gas',\n 'AVALTV12': 'gas',\n 'BAMODI01': 'gas',\n 'BANDDI01': 'oil',\n 'BARDDI01': 'oil',\n 'BBLATV29': 'gas',\n 'BBLATV30': 'gas',\n 'BBLMDI01': 'oil',\n 'BBLMDI02': 'oil',\n 'BBLMDI03': 'oil',\n 'BBLMDI04': 'oil',\n 'BBLMDI05': 'oil',\n 'BBLMDI06': 'oil',\n 'BERIDI01': 'gas',\n 'BLOPTG01': 'gas',\n 'BRAGTG01': 'gas',\n 'BRAGTG02': 'gas',\n 'BRAGTG03': 'gas',\n 'BRAGTG04': 'gas',\n 'BRAGTG05': 'gas',\n 'BRAGTG06': 'gas',\n 'BRC1DI01': 'oil',\n 'BRCHTG01': 'gas',\n 'BROWTG01': 'gas',\n 'BROWTG02': 'gas',\n 'BSASTG01': 'gas',\n 'BSASTV01': 'gas',\n 'BVILDI01': 'oil',\n 'CACHDI01': 'gas',\n 'CACHHI': 'hydro',\n 'CADIHI': 'hydro',\n 'CAFADI01': 'gas',\n 'CAIMDI01': 'oil',\n 'CAIMDI02': 'oil',\n 'CAIMDI03': 'oil',\n 'CAIMDI04': 'oil',\n 'CAIMDI05': 'oil',\n 'CARLDI01': 'oil',\n 'CARRHI': 'hydro',\n 'CASSHI': 'hydro',\n 'CASTDI01': 'oil',\n 'CATADI01': 'oil',\n 'CATDDI01': 'oil',\n 'CAVIDI01': 'oil',\n 'CCOLHI': 'hydro',\n 'CCORHI': 'hydro',\n 'CEMODI01': 'gas',\n 'CEPUTG11': 'gas',\n 'CEPUTG12': 'gas',\n 'CEPUTV10': 'gas',\n 'CEREDI01': 'oil',\n 'CERITV01': 'gas',\n 'CESPHI': 'hydro',\n 'CGOMDI01': 'oil',\n 'CGOMDI02': 'oil',\n 'CGOMDI03': 'oil',\n 'CGOMDI04': 'oil',\n 'CHARDI01': 'oil',\n 'CHARDI02': 'oil',\n 'CHEPDI01': 'oil',\n 'CHILDI01': 'oil',\n 'CHLEDI01': 'oil',\n 'CHOCHI': 'hydro',\n 'CIPODI01': 'oil',\n 'CIPOHI': 'hydro',\n 'COLBDI01': 'oil',\n 'COMODI01': 'gas',\n 'CONDHI': 'hydro',\n 'COROHI': 'hydro',\n 'CORRDI01': 'gas',\n 'COSMDI11': 'oil',\n 'COSTTG08': 'gas',\n 'COSTTG09': 'gas',\n 'COSTTV01': 'gas',\n 'COSTTV02': 'gas',\n 'COSTTV03': 'gas',\n 'COSTTV04': 'gas',\n 'COSTTV06': 'gas',\n 'COSTTV07': 'gas',\n 'COSTTV10': 'gas',\n 'CPIEHI': 'hydro',\n 'CSARDI01': 'oil',\n 'CUMODI01': 'gas',\n 'CURUTG01': 'gas',\n 'CURUTG02': 'gas',\n 'DFUNDI01': 'oil',\n 'DFUNTG02': 'gas',\n 'DIADEO': 'hydro',\n 'DIQUTG02': 'gas',\n 'DIQUTG03': 'gas',\n 'DSUDTG07': 'gas',\n 'DSUDTG08': 'gas',\n 'DSUDTG09': 'gas',\n 'DSUDTG10': 'gas',\n 'DSUDTV11': 'gas',\n 'EBARTG01': 'gas',\n 'EBARTG02': 'gas',\n 'ELOMDI01': 'gas',\n 'ENSETG01': 'gas',\n 'EMBANUCL': 'nuclear',\n 'ESCAHI': 'hydro',\n 'ESQDDI01': 'oil',\n 'EZEITG01': 'gas',\n 'EZEITG02': 'gas',\n 'EZEITG03': 'gas',\n 'FORDDI01': 'oil',\n 'FORDDI02': 'oil',\n 'FRIATG01': 'gas',\n 'FSIMHI': 'hydro',\n 'FUTAHI': 'hydro',\n 'GBELTG01': 'gas',\n 'GBELTG02': 'gas',\n 'GBELTV01': 'gas',\n 'GBMODI01': 'gas',\n 'GEBATG01': 'gas',\n 'GEBATG02': 'gas',\n 'GEBATG03': 'gas',\n 'GEBATV01': 'gas',\n 'GOYDDI01': 'oil',\n 'GUEMTG01': 'gas',\n 'GUEMTV11': 'gas',\n 'GUEMTV12': 'gas',\n 'GUEMTV13': 'gas',\n 'HON1FV': 'hydro',\n 'HRENDI01': 'oil',\n 'HUMADI01': 'oil',\n 'HUEMDI01': 'gas',\n 'INDETG01': 'gas',\n 'INDETG02': 'gas',\n 'INDETG03': 'gas',\n 'INTADI01': 'oil',\n 'ISBATV01': 'gas',\n 'ISVEDI01': 'oil',\n 'ITATDI01': 'oil',\n 'JUARDI01': 'oil',\n 'JUNIDI01': 'oil',\n 'LBANTG21': 'gas',\n 'LBANTG22': 'gas',\n 'LBLADI01': 'oil',\n 'LCA2TG01': 'gas',\n 'LCAMTG01': 'gas',\n 'LDCUHI': 'hydro',\n 'LDCUTG22': 'gas',\n 'LDCUTG23': 'gas',\n 'LDCUTG24': 'gas',\n 'LDCUTG25': 'gas',\n 'LDCUTV11': 'gas',\n 'LDCUTV12': 'gas',\n 'LDCUTV14': 'gas',\n 'LDCUTV15': 'gas',\n 'LDLADI01': 'oil',\n 'LDLATG01': 'gas',\n 'LDLATG02': 'gas',\n 'LDLATG03': 'gas',\n 'LDLATG04': 'gas',\n 'LDLATG05': 'gas',\n 'LDLATV01': 'gas',\n 'LEDETV01': 'biomass',\n 'LEVADI01': 'oil',\n 'LEVATG01': 'gas',\n 'LEVATG02': 'gas',\n 'LIBEDI01': 'oil',\n 'LINCDI01': 'oil',\n 'LMADHI': 'hydro',\n 'LMO1HI': 'hydro',\n 'LMO2HI': 'hydro',\n 'LOM1EO': 'hydro',\n 'LOBODI01': 'oil',\n 'LPALDI01': 'oil',\n 'LPAZDI01': 'oil',\n 'LPLADI01': 'oil',\n 'LQUIHI': 'hydro',\n 'LREYHB': 'hydro',\n 'LRIDDI01': 'oil',\n 'LRIODI': 'oil',\n 'LRIOTG21': 'gas',\n 'LRIOTG22': 'gas',\n 'LRIOTG23': 'gas',\n 'LRIOTG24': 'gas',\n 'LRIPDI01': 'oil',\n 'LRISDI01': 'oil',\n 'LROBDI01': 'oil',\n 'LVARDI01': 'oil',\n 'LVINHI': 'hydro',\n 'MAGDDI01': 'oil',\n 'MATETG01': 'gas',\n 'MATETG02': 'gas',\n 'MATETG03': 'gas',\n 'MATETG04': 'gas',\n 'MATETG05': 'gas',\n 'MATETG06': 'gas',\n 'MATETG07': 'gas',\n 'MATETG08': 'gas',\n 'MATETG09': 'gas',\n 'MATETG10': 'gas',\n 'MATHTG01': 'gas',\n 'MATHTG02': 'gas',\n 'MDAJTG15': 'oil',\n 'MDAJTG17': 'oil',\n 'MDPATG12': 'gas',\n 'MDPATG13': 'gas',\n 'MDPATG19': 'gas',\n 'MDPATG20': 'gas',\n 'MDPATG21': 'gas',\n 'MDPATG22': 'gas',\n 'MDPATG23': 'gas',\n 'MDPATG24': 'gas',\n 'MDPATV07': 'gas',\n 'MDPATV08': 'gas',\n 'MESEDI01': 'oil',\n 'MIR1DI01': 'oil',\n 'MJUADI01': 'oil',\n 'MMARTG01': 'gas',\n 'MMARTG02': 'gas',\n 'MMARTG03': 'gas',\n 'MMARTG04': 'gas',\n 'MMARTG05': 'gas',\n 'MMARTG06': 'gas',\n 'MMARTG07': 'gas',\n 'MSEVTG01': 'gas',\n 'NECOEO': 'hydro',\n 'NECOTV01': 'gas',\n 'NECOTV02': 'gas',\n 'NECOTV03': 'gas',\n 'NECOTV04': 'gas',\n 'NESPDI02': 'oil',\n 'NIH1HI': 'hydro',\n 'NIH4HI': 'hydro',\n 'NOMODI01': 'gas',\n 'NPOMDI01': 'gas',\n 'NPUETV05': 'gas',\n 'NPUETV06': 'gas',\n 'OBERTG01': 'gas',\n 'OCAMDI01': 'oil',\n 'OCAMDI02': 'oil',\n 'OCAMDI03': 'oil',\n 'OCAMDI04': 'oil',\n 'OCAMDI05': 'oil',\n 'OLADTG01': 'gas',\n 'OLADTG02': 'gas',\n 'OLPADI01': 'oil',\n 'ORADDI01': 'oil',\n 'PAGUHI': 'hydro',\n 'PAMODI01': 'oil',\n 'PARATG01': 'gas',\n 'PARATG02': 'gas',\n 'PATATG01': 'gas',\n 'PATATG02': 'gas',\n 'PATATV01': 'gas',\n 'PBANHI': 'hydro',\n 'PEHUDI01': 'oil',\n 'PERZDI01': 'oil',\n 'PERZDI02': 'oil',\n 'PERZDI03': 'oil',\n 'PERZDI04': 'oil',\n 'PERZDI05': 'oil',\n 'PERZDI06': 'oil',\n 'PERZDI07': 'oil',\n 'PERZDI08': 'oil',\n 'PESPTV01': 'gas',\n 'PHDZTG01': 'gas',\n 'PHUITG01': 'gas',\n 'PICADI01': 'oil',\n 'PILBDI01': 'oil',\n 'PILBDI02': 'oil',\n 'PILBDI03': 'oil',\n 'PILBDI04': 'oil',\n 'PILBDI05': 'oil',\n 'PILBDI06': 'oil',\n 'PILATG11': 'gas',\n 'PILATG12': 'gas',\n 'PILATV01': 'gas',\n 'PILATV02': 'gas',\n 'PILATV03': 'gas',\n 'PILATV04': 'gas',\n 'PILATV10': 'gas',\n 'PINATG07': 'gas',\n 'PINATG08': 'gas',\n 'PINATG09': 'gas',\n 'PINATG10': 'gas',\n 'PIQIDI01': 'oil',\n 'PIRADI01': 'oil',\n 'PMORHI': 'hydro',\n 'PNEGHI': 'hydro',\n 'PNUETV07': 'gas',\n 'PNUETV08': 'gas',\n 'PNUETV09': 'gas',\n 'POSAIN': 'hydro',\n 'PPATDI01': 'oil',\n 'PPLEHI': 'hydro',\n 'PPNOTG01': 'gas',\n 'PPNOTG02': 'gas',\n 'PROCDI01': 'oil',\n 'PROVTV01': 'gas',\n 'PTR1TG23': 'gas',\n 'PTR1TG24': 'gas',\n 'PTR1TG25': 'gas',\n 'PUPITV01': 'gas',\n 'PVIEHI': 'hydro',\n 'PZUEDI01': 'oil',\n 'QULLHI': 'hydro',\n 'RAFADI01': 'oil',\n 'RAW1EO': 'hydro',\n 'RAW2EO': 'hydro',\n 'RCEPDI01': 'oil',\n 'RCUATG02': 'gas',\n 'REALDI01': 'oil',\n 'REOLHI': 'hydro',\n 'RESCDI01': 'oil',\n 'RGDEHB': 'hydro',\n 'RHONHI': 'hydro',\n 'RICADI01': 'oil',\n 'ROCATG01': 'gas',\n 'ROJOTG01': 'gas',\n 'ROJOTG02': 'gas',\n 'ROJOTG03': 'gas',\n 'ROMEHI': 'hydro',\n 'RREYHI': 'hydro',\n 'RSAUDI01': 'oil',\n 'RTERTG01': 'gas',\n 'RTERTG02': 'gas',\n 'RUFIDI01': 'oil',\n 'SALOHI': 'hydro',\n 'SANADI01': 'oil',\n 'SANDHI': 'hydro',\n 'SARCTG21': 'gas',\n 'SARCTG22': 'gas',\n 'SARCTG23': 'gas',\n 'SCHADI01': 'oil',\n 'SCTPDI01': 'oil',\n 'SERTTG01': 'gas',\n 'SFR2DI01': 'oil',\n 'SFRATG01': 'gas',\n 'SFRATG02': 'gas',\n 'SGDEHIAR': 'hydro',\n 'SGUIHI': 'hydro',\n 'SHELTG01': 'gas',\n 'SJUAFV': 'hydro',\n 'SLTODI01': 'oil',\n 'SMANDI01': 'oil',\n 'SMARDI01': 'oil',\n 'SMIGDI01': 'oil',\n 'SMTUTG01': 'gas',\n 'SMTUTG02': 'gas',\n 'SMTUTV01': 'gas',\n 'SNICTV11': 'coal',\n 'SNICTV12': 'coal',\n 'SNICTV13': 'coal',\n 'SNICTV14': 'coal',\n 'SNICTV15': 'coal',\n 'SOESTG03': 'gas',\n 'SOLATG01': 'gas',\n 'SORRTV13': 'gas',\n 'SPE2DI01': 'oil',\n 'SPENDI01': 'oil',\n 'SPEVDI01': 'oil',\n 'SROQHI': 'hydro',\n 'SROSDI01': 'oil',\n 'SSALDI01': 'oil',\n 'SVICDI01': 'oil',\n 'TABATV01': 'gas',\n 'TANDTG01': 'gas',\n 'TANDTG02': 'gas',\n 'TANDTV01': 'gas',\n 'TARDDI01': 'oil',\n 'TELLDI01': 'oil',\n 'TERVDI01': 'oil',\n 'TIMBTG01': 'gas',\n 'TIMBTG02': 'gas',\n 'TIMBTV01': 'gas',\n 'TINODI01': 'oil',\n 'TORDEO': 'hydro',\n 'TUCUTG01': 'gas',\n 'TUCUTG02': 'gas',\n 'TUCUTV01': 'gas',\n 'TUNAHI': 'hydro',\n 'ULLUHI': 'hydro',\n 'VANGDI01': 'oil',\n 'VGADDI01': 'oil',\n 'VGEPDI01': 'oil',\n 'VGESTG11': 'gas',\n 'VGESTG14': 'gas',\n 'VGESTG16': 'gas',\n 'VGESTG18': 'gas',\n 'VIALDI01': 'oil',\n 'VMA2TG01': 'gas',\n 'VMA2TG02': 'gas',\n 'VMA2TG03': 'gas',\n 'VMA2TG04': 'gas',\n 'VMARTG01': 'gas',\n 'VMARTG02': 'gas',\n 'VMARTG03': 'gas',\n 'VOBLTG01': 'gas',\n 'VOBLTG02': 'gas',\n 'VOBLTV01': 'gas',\n 'VTUDDI01': 'oil',\n 'VTUEDI01': 'oil',\n 'YACYHI': 'hydro',\n 'YANQDI01': 'oil',\n 'YPFATG01': 'gas',\n 'ZAPATG01': 'gas',\n 'ZAPATG02': 'gas',\n 'ZAPATG03': 'gas',\n 'ZAPATG04': 'gas'\n}\n\n# URL's for thermal and hydro pages and data sources respectively.\n\nurl = ('http://portalweb.cammesa.com/MEMNet1/Pages/Informes%20por'\n '%20Categor%C3%ADa/Operativos/VisorReporteSinComDesp_minimal.aspx'\n '?hora=0&titulo=Despacho%20Generacion%20Termica&reportPath='\n 'http://lauzet:5000/MemNet1/ReportingServices/DespachoGeneracion'\n 'Termica.rdl--0--Despacho+Generaci%c3%b3n+T%c3%a9rmica')\n\nturl = ('http://portalweb.cammesa.com/Reserved.ReportViewerWebControl.'\n 'axd?Culture=3082&UICulture=3082&ReportStack=1'\n '&OpType=ReportArea&Controller=ClientController'\n 'ctl00_ctl04_g_a581304b_aafc_4818_a4a1_e96f27a22246_ctl00_RepViewer'\n '&ZoomMode=Percent&ZoomPct=100&ReloadDocMap='\n 'true&SearchStartPage=0&LinkTarget=_top')\n\nhurl = ('http://portalweb.cammesa.com/memnet1/Pages/Informes%20por%20Categor'\n '%C3%ADa/Operativos/VisorReportesSinCom_minimal.aspx?hora=0&'\n 'titulo=Despacho%20Generacion%20Hidraulica&reportPath='\n 'http://lauzet:5000/MemNet1/ReportingServices/'\n 'DespachoGeneracionHidraulica.rdl--0--Despacho+Generaci%c3%b3n+Zona+'\n 'Hidr%c3%a1ulica')\n\nthurl = ('http://portalweb.cammesa.com/Reserved.ReportViewerWebControl.'\n 'axd?Culture=3082&UICulture=3082&ReportStack=1'\n '&OpType=ReportArea&Controller=ClientController'\n 'ctl00_ctl04_g_966166c3_db78_453e_9a34_83d2bb263ee4_''ctl00_RepViewer'\n '&ZoomMode=Percent&ZoomPct=100&ReloadDocMap='\n 'true&SearchStartPage=0&LinkTarget=_top')\n\ncammesa_url = 'http://portalweb.cammesa.com/default.aspx'\n\n\ndef webparser(req):\n \"\"\"Takes content from webpage and returns all text as a list of strings\"\"\"\n\n soup = BeautifulSoup(req.content, 'html.parser')\n figs = soup.find_all(\"div\", class_=\"r11\")\n data_table = [unicode(tag.get_text()) for tag in figs]\n\n return data_table\n\n\ndef fetch_price(zone_key='AR', session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known power price of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'currency': EUR,\n 'datetime': '2017-01-01T00:00:00Z',\n 'price': 0.0,\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n s = session or requests.Session()\n price_req = s.get(cammesa_url)\n psoup = BeautifulSoup(price_req.content, 'html.parser')\n find_price = psoup.find('td', class_=\"cssFuncionesLeft\", align=\"left\")\n\n try:\n price_text = find_price.getText()\n\n # Strip all whitespace and isolate number. Convert to float.\n price_nws = \"\".join(price_text.split())\n lprice = price_nws.rpartition(':')[2]\n rprice = lprice.split('[')[0]\n price = float(rprice.replace(',', '.'))\n\n except (AttributeError, ValueError):\n # Price element not present or no price stated.\n price = None\n\n datetime = arrow.now('UTC-3').floor('hour').datetime\n\n data = {\n 'zoneKey': zone_key,\n 'currency': 'ARS',\n 'datetime': datetime,\n 'price': price,\n 'source': 'portalweb.cammesa.com'\n }\n\n return data\n\n\ndef get_datetime(session=None):\n \"\"\"\n Generation data is updated hourly. Makes request then finds most recent hour available.\n Returns an arrow datetime object using UTC-3 for timezone and zero for minutes and seconds.\n \"\"\"\n\n # Argentina does not currently observe daylight savings time. This may change from year to year!\n # https://en.wikipedia.org/wiki/Time_in_Argentina\n s = session or requests.Session()\n rt = s.get(url)\n timesoup = BeautifulSoup(rt.content, 'html.parser')\n find_hour = timesoup.find(\"option\", selected=\"selected\", value=\"1\").getText()\n at = arrow.now('UTC-3').floor('hour')\n datetime = (at.replace(hour=int(find_hour), minute=0, second=0)).datetime\n\n return {'datetime': datetime}\n\n\ndef dataformat(junk):\n \"\"\"Takes string data with only digits and returns it as a float.\"\"\"\n\n formatted = []\n for item in junk:\n if not any(char in item for char in string.ascii_letters):\n item = float(item.replace(',', '.'))\n formatted.append(item)\n\n return formatted\n\n\ndef get_thermal(session, logger):\n \"\"\"\n Requests thermal generation data then parses and sorts by type. Nuclear is included.\n Returns a dictionary.\n \"\"\"\n\n # Need to persist session in order to get ControlID and ReportSession so we can send second request\n # for table data. Both these variables change on each new request.\n s = session or requests.Session()\n r = s.get(url)\n pat = re.search(\"ControlID=[^&]*\", r.text).group()\n spat = re.search(\"ReportSession=[^&]*\", r.text).group()\n cid = pat.rpartition('=')[2]\n rs = spat.rpartition('=')[2]\n full_table = []\n\n # 'En Reserva' plants are not generating and can be ignored.\n # The table has an extra column on 'Costo Operativo' page which must be removed to find power generated correctly.\n\n pagenumber = 1\n reserves = False\n\n while not reserves:\n t = s.get(turl, params={'ControlID': cid, 'ReportSession': rs,\n 'PageNumber': '{}'.format(pagenumber)})\n text_only = webparser(t)\n if 'Estado' in text_only:\n for item in text_only:\n if len(item) == 1 and item in string.ascii_letters:\n text_only.remove(item)\n if 'En Reserva' in text_only:\n reserves = True\n continue\n full_table.append(text_only)\n pagenumber += 1\n\n data = list(itertools.chain.from_iterable(full_table))\n formatted_data = dataformat(data)\n mapped_data = [power_plant_type.get(x, x) for x in formatted_data]\n\n for item in mapped_data:\n try:\n # avoids including titles and headings\n if all((item.isupper(), not item.isalpha(), ' ' not in item)):\n logger.warning(\n '{} is missing from the AR plant mapping!'.format(item),\n extra={'key': 'AR'})\n except AttributeError:\n # not a string....\n continue\n\n find_totals = [i + 1 for i, x in enumerate(mapped_data) if x == 'Totales ']\n thermal_generation = sum([mapped_data[i] for i in find_totals])\n\n find_nuclear = [i + 2 for i, x in enumerate(mapped_data) if x == 'nuclear']\n nuclear_generation = sum([mapped_data[i] for i in find_nuclear])\n find_oil = [i + 2 for i, x in enumerate(mapped_data) if x == 'oil']\n oil_generation = sum([mapped_data[i] for i in find_oil])\n find_coal = [i + 2 for i, x in enumerate(mapped_data) if x == 'coal']\n coal_generation = sum([mapped_data[i] for i in find_coal])\n find_biomass = [i + 2 for i, x in enumerate(mapped_data) if x == 'biomass']\n biomass_generation = sum([mapped_data[i] for i in find_biomass])\n find_gas = [i + 2 for i, x in enumerate(mapped_data) if x == 'gas']\n gas_generation = sum([mapped_data[i] for i in find_gas])\n\n unknown_generation = (thermal_generation - nuclear_generation - gas_generation\n - oil_generation - coal_generation - biomass_generation)\n\n if unknown_generation < 0.0:\n unknown_generation = 0.0\n\n return {'gas': gas_generation,\n 'nuclear': nuclear_generation,\n 'coal': coal_generation,\n 'unknown': unknown_generation,\n 'oil': oil_generation,\n 'biomass': biomass_generation\n }\n\n\ndef get_hydro(session=None):\n \"\"\"Requests hydro generation data then parses, returns a dictionary.\"\"\"\n\n s = session or requests.Session()\n r = s.get(hurl)\n pat = re.search(\"ControlID=[^&]*\", r.text).group()\n spat = re.search(\"ReportSession=[^&]*\", r.text).group()\n cid = pat.rpartition('=')[2]\n rs = spat.rpartition('=')[2]\n full_table = []\n\n pagenumber = 1\n reserves = False\n\n while not reserves:\n t = s.get(thurl, params={'ControlID': cid, 'ReportSession': rs,\n 'PageNumber': '{}'.format(pagenumber)})\n text_only = webparser(t)\n if 'En Reserva' in text_only:\n reserves = True\n continue\n full_table.append(text_only)\n pagenumber += 1\n\n data = list(itertools.chain.from_iterable(full_table))\n formatted_data = dataformat(data)\n find_hydro = [i + 1 for i, x in enumerate(formatted_data) if x == 'Totales ']\n total_hydro_generation = sum([formatted_data[i] for i in find_hydro])\n\n return {'hydro': total_hydro_generation}\n\n\ndef fetch_production(zone_key='AR', session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n target_datetime: if we want to parser for a specific time and not latest\n logger: where to log useful information\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime is not None:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n gdt = get_datetime(session=None)\n thermal = get_thermal(session, logger)\n hydro = get_hydro(session=None)\n production_mix = {\n 'zoneKey': zone_key,\n 'datetime': gdt['datetime'],\n 'production': {\n 'biomass': thermal.get('biomass', 0.0),\n 'coal': thermal.get('coal', 0.0),\n 'gas': thermal.get('gas', 0.0),\n 'hydro': hydro.get('hydro', 0.0),\n 'nuclear': thermal.get('nuclear', 0.0),\n 'oil': thermal.get('oil', 0.0),\n 'solar': None,\n 'wind': None,\n 'geothermal': 0.0,\n 'unknown': thermal.get('unknown', 0.0)\n },\n 'storage': {\n 'hydro': None,\n },\n 'source': 'portalweb.cammesa.com'\n }\n\n return production_mix\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_price() ->')\n print(fetch_price())\n",
"path": "parsers/AR.py"
}
] | [
{
"content": "#!/usr/bin/env python3\n\nimport itertools\nimport re\nimport string\n\nimport arrow\nimport requests\nfrom bs4 import BeautifulSoup\n\ntry:\n unicode # Python 2\nexcept NameError:\n unicode = str # Python 3\n\n# This parser gets hourly electricity generation data from portalweb.cammesa.com/Memnet1/default.aspx\n# for Argentina. Currently wind and solar power are small contributors and not monitored but this is\n# likely to change in the future.\n\n# Useful links.\n# https://en.wikipedia.org/wiki/Electricity_sector_in_Argentina\n# https://en.wikipedia.org/wiki/List_of_power_stations_in_Argentina\n# http://globalenergyobservatory.org/countryid/10#\n# http://www.industcards.com/st-other-argentina.htm\n\n\n# Map of power plants to generation type.\n# http://portalweb.cammesa.com/memnet1/revistas/estacional/base_gen.html\n\npower_plant_type = {\n 'ABRODI01': 'gas',\n 'ACAJTG01': 'gas',\n 'ACAJTG02': 'gas',\n 'ACAJTG03': 'gas',\n 'ACAJTG04': 'gas',\n 'ACAJTG05': 'gas',\n 'ACAJTG06': 'gas',\n 'ACAJTV07': 'gas',\n 'ADTOHI': 'hydro',\n 'AESPTG01': 'gas',\n 'AESPTG02': 'gas',\n 'AESPTV01': 'gas',\n 'ALEMDI01': 'oil',\n 'ALICHI': 'hydro',\n 'ALOMDI01': 'gas',\n 'ALUATG06': 'gas',\n 'ALUATG07': 'gas',\n 'ALUATG08': 'gas',\n 'ALUATV01': 'gas',\n 'ALUMDI01': 'oil',\n 'AMEGHI': 'hydro',\n 'ANATDI01': 'gas',\n 'ANATDI02': 'gas',\n 'ANCHDI01': 'oil',\n 'ANCHDI02': 'oil',\n 'ANCHDI03': 'oil',\n 'ANCHDI04': 'oil',\n 'APARTV01': 'gas',\n 'ARA2EO': 'hydro',\n 'ARAUEO': 'hydro',\n 'ARGETG01': 'gas',\n 'ARISDI01': 'oil',\n 'ARMATG01': 'gas',\n 'ARMATG02': 'gas',\n 'ARMATG03': 'gas',\n 'ARREDI01': 'gas',\n 'ARROHI': 'hydro',\n 'ATUCNUCL': 'nuclear',\n 'ATU2NUCL': 'nuclear',\n 'AVALTG21': 'gas',\n 'AVALTG22': 'gas',\n 'AVALTG23': 'gas',\n 'AVALTV11': 'gas',\n 'AVALTV12': 'gas',\n 'BAMODI01': 'gas',\n 'BANDDI01': 'oil',\n 'BARDDI01': 'oil',\n 'BBLATV29': 'gas',\n 'BBLATV30': 'gas',\n 'BBLMDI01': 'oil',\n 'BBLMDI02': 'oil',\n 'BBLMDI03': 'oil',\n 'BBLMDI04': 'oil',\n 'BBLMDI05': 'oil',\n 'BBLMDI06': 'oil',\n 'BERIDI01': 'gas',\n 'BLOPTG01': 'gas',\n 'BRAGTG01': 'gas',\n 'BRAGTG02': 'gas',\n 'BRAGTG03': 'gas',\n 'BRAGTG04': 'gas',\n 'BRAGTG05': 'gas',\n 'BRAGTG06': 'gas',\n 'BRC1DI01': 'oil',\n 'BRCHTG01': 'gas',\n 'BROWTG01': 'gas',\n 'BROWTG02': 'gas',\n 'BSASTG01': 'gas',\n 'BSASTV01': 'gas',\n 'BVILDI01': 'oil',\n 'CACHDI01': 'gas',\n 'CACHHI': 'hydro',\n 'CADIHI': 'hydro',\n 'CAFADI01': 'gas',\n 'CAIMDI01': 'oil',\n 'CAIMDI02': 'oil',\n 'CAIMDI03': 'oil',\n 'CAIMDI04': 'oil',\n 'CAIMDI05': 'oil',\n 'CARLDI01': 'oil',\n 'CARRHI': 'hydro',\n 'CASSHI': 'hydro',\n 'CASTDI01': 'oil',\n 'CATADI01': 'oil',\n 'CATDDI01': 'oil',\n 'CAVIDI01': 'oil',\n 'CCOLHI': 'hydro',\n 'CCORHI': 'hydro',\n 'CEMODI01': 'gas',\n 'CEPUTG11': 'gas',\n 'CEPUTG12': 'gas',\n 'CEPUTV10': 'gas',\n 'CEREDI01': 'oil',\n 'CERITV01': 'gas',\n 'CESPHI': 'hydro',\n 'CGOMDI01': 'oil',\n 'CGOMDI02': 'oil',\n 'CGOMDI03': 'oil',\n 'CGOMDI04': 'oil',\n 'CHARDI01': 'oil',\n 'CHARDI02': 'oil',\n 'CHEPDI01': 'oil',\n 'CHILDI01': 'oil',\n 'CHLEDI01': 'oil',\n 'CHOCHI': 'hydro',\n 'CIPODI01': 'oil',\n 'CIPOHI': 'hydro',\n 'COLBDI01': 'oil',\n 'COMODI01': 'gas',\n 'CONDHI': 'hydro',\n 'COROHI': 'hydro',\n 'CORRDI01': 'gas',\n 'COSMDI11': 'oil',\n 'COSTTG08': 'gas',\n 'COSTTG09': 'gas',\n 'COSTTV01': 'gas',\n 'COSTTV02': 'gas',\n 'COSTTV03': 'gas',\n 'COSTTV04': 'gas',\n 'COSTTV06': 'gas',\n 'COSTTV07': 'gas',\n 'COSTTV10': 'gas',\n 'CPIEHI': 'hydro',\n 'CSARDI01': 'oil',\n 'CUMODI01': 'gas',\n 'CURUTG01': 'gas',\n 'CURUTG02': 'gas',\n 'DFUNDI01': 'oil',\n 'DFUNTG02': 'gas',\n 'DIADEO': 'hydro',\n 'DIQUTG02': 'gas',\n 'DIQUTG03': 'gas',\n 'DSUDTG07': 'gas',\n 'DSUDTG08': 'gas',\n 'DSUDTG09': 'gas',\n 'DSUDTG10': 'gas',\n 'DSUDTV11': 'gas',\n 'EBARTG01': 'gas',\n 'EBARTG02': 'gas',\n 'ELOMDI01': 'gas',\n 'ENSETG01': 'gas',\n 'EMBANUCL': 'nuclear',\n 'ESCAHI': 'hydro',\n 'ESQDDI01': 'oil',\n 'EZEITG01': 'gas',\n 'EZEITG02': 'gas',\n 'EZEITG03': 'gas',\n 'FORDDI01': 'oil',\n 'FORDDI02': 'oil',\n 'FRIATG01': 'gas',\n 'FSIMHI': 'hydro',\n 'FUTAHI': 'hydro',\n 'GBELTG01': 'gas',\n 'GBELTG02': 'gas',\n 'GBELTV01': 'gas',\n 'GBMODI01': 'gas',\n 'GEBATG01': 'gas',\n 'GEBATG02': 'gas',\n 'GEBATG03': 'gas',\n 'GEBATV01': 'gas',\n 'GOYDDI01': 'oil',\n 'GUEMTG01': 'gas',\n 'GUEMTV11': 'gas',\n 'GUEMTV12': 'gas',\n 'GUEMTV13': 'gas',\n 'HON1FV': 'hydro',\n 'HRENDI01': 'oil',\n 'HUMADI01': 'oil',\n 'HUEMDI01': 'gas',\n 'INDETG01': 'gas',\n 'INDETG02': 'gas',\n 'INDETG03': 'gas',\n 'INTADI01': 'oil',\n 'ISBATV01': 'gas',\n 'ISVEDI01': 'oil',\n 'ITATDI01': 'oil',\n 'JUARDI01': 'oil',\n 'JUNIDI01': 'oil',\n 'LBANTG21': 'gas',\n 'LBANTG22': 'gas',\n 'LBLADI01': 'oil',\n 'LCA2TG01': 'gas',\n 'LCAMTG01': 'gas',\n 'LDCUHI': 'hydro',\n 'LDCUTG22': 'gas',\n 'LDCUTG23': 'gas',\n 'LDCUTG24': 'gas',\n 'LDCUTG25': 'gas',\n 'LDCUTV11': 'gas',\n 'LDCUTV12': 'gas',\n 'LDCUTV14': 'gas',\n 'LDCUTV15': 'gas',\n 'LDLADI01': 'oil',\n 'LDLATG01': 'gas',\n 'LDLATG02': 'gas',\n 'LDLATG03': 'gas',\n 'LDLATG04': 'gas',\n 'LDLATG05': 'gas',\n 'LDLATV01': 'gas',\n 'LEDETV01': 'biomass',\n 'LEVADI01': 'oil',\n 'LEVATG01': 'gas',\n 'LEVATG02': 'gas',\n 'LIBEDI01': 'oil',\n 'LINCDI01': 'oil',\n 'LMADHI': 'hydro',\n 'LMO1HI': 'hydro',\n 'LMO2HI': 'hydro',\n 'LOM1EO': 'hydro',\n 'LOBODI01': 'oil',\n 'LPALDI01': 'oil',\n 'LPAZDI01': 'oil',\n 'LPLADI01': 'oil',\n 'LQUIHI': 'hydro',\n 'LREYHB': 'hydro',\n 'LRIDDI01': 'oil',\n 'LRIODI': 'oil',\n 'LRIOTG21': 'gas',\n 'LRIOTG22': 'gas',\n 'LRIOTG23': 'gas',\n 'LRIOTG24': 'gas',\n 'LRIPDI01': 'oil',\n 'LRISDI01': 'oil',\n 'LROBDI01': 'oil',\n 'LVARDI01': 'oil',\n 'LVINHI': 'hydro',\n 'MAGDDI01': 'oil',\n 'MATETG01': 'gas',\n 'MATETG02': 'gas',\n 'MATETG03': 'gas',\n 'MATETG04': 'gas',\n 'MATETG05': 'gas',\n 'MATETG06': 'gas',\n 'MATETG07': 'gas',\n 'MATETG08': 'gas',\n 'MATETG09': 'gas',\n 'MATETG10': 'gas',\n 'MATHTG01': 'gas',\n 'MATHTG02': 'gas',\n 'MDAJTG15': 'oil',\n 'MDAJTG17': 'oil',\n 'MDPATG12': 'gas',\n 'MDPATG13': 'gas',\n 'MDPATG19': 'gas',\n 'MDPATG20': 'gas',\n 'MDPATG21': 'gas',\n 'MDPATG22': 'gas',\n 'MDPATG23': 'gas',\n 'MDPATG24': 'gas',\n 'MDPATV07': 'gas',\n 'MDPATV08': 'gas',\n 'MESEDI01': 'oil',\n 'MIR1DI01': 'oil',\n 'MJUADI01': 'oil',\n 'MMARTG01': 'gas',\n 'MMARTG02': 'gas',\n 'MMARTG03': 'gas',\n 'MMARTG04': 'gas',\n 'MMARTG05': 'gas',\n 'MMARTG06': 'gas',\n 'MMARTG07': 'gas',\n 'MSEVTG01': 'gas',\n 'NECOEO': 'hydro',\n 'NECOTV01': 'gas',\n 'NECOTV02': 'gas',\n 'NECOTV03': 'gas',\n 'NECOTV04': 'gas',\n 'NESPDI02': 'oil',\n 'NIH1HI': 'hydro',\n 'NIH4HI': 'hydro',\n 'NOMODI01': 'gas',\n 'NPOMDI01': 'gas',\n 'NPUETV05': 'gas',\n 'NPUETV06': 'gas',\n 'OBERTG01': 'gas',\n 'OCAMDI01': 'oil',\n 'OCAMDI02': 'oil',\n 'OCAMDI03': 'oil',\n 'OCAMDI04': 'oil',\n 'OCAMDI05': 'oil',\n 'OLADTG01': 'gas',\n 'OLADTG02': 'gas',\n 'OLPADI01': 'oil',\n 'ORADDI01': 'oil',\n 'PAGUHI': 'hydro',\n 'PAMODI01': 'oil',\n 'PARATG01': 'gas',\n 'PARATG02': 'gas',\n 'PATATG01': 'gas',\n 'PATATG02': 'gas',\n 'PATATV01': 'gas',\n 'PBANHI': 'hydro',\n 'PEHUDI01': 'oil',\n 'PERZDI01': 'oil',\n 'PERZDI02': 'oil',\n 'PERZDI03': 'oil',\n 'PERZDI04': 'oil',\n 'PERZDI05': 'oil',\n 'PERZDI06': 'oil',\n 'PERZDI07': 'oil',\n 'PERZDI08': 'oil',\n 'PESPTV01': 'gas',\n 'PHDZTG01': 'gas',\n 'PHUITG01': 'gas',\n 'PICADI01': 'oil',\n 'PILBDI01': 'oil',\n 'PILBDI02': 'oil',\n 'PILBDI03': 'oil',\n 'PILBDI04': 'oil',\n 'PILBDI05': 'oil',\n 'PILBDI06': 'oil',\n 'PILATG11': 'gas',\n 'PILATG12': 'gas',\n 'PILATV01': 'gas',\n 'PILATV02': 'gas',\n 'PILATV03': 'gas',\n 'PILATV04': 'gas',\n 'PILATV10': 'gas',\n 'PINATG07': 'gas',\n 'PINATG08': 'gas',\n 'PINATG09': 'gas',\n 'PINATG10': 'gas',\n 'PIQIDI01': 'oil',\n 'PIRADI01': 'oil',\n 'PMORHI': 'hydro',\n 'PNEGHI': 'hydro',\n 'PNUETV07': 'gas',\n 'PNUETV08': 'gas',\n 'PNUETV09': 'gas',\n 'POSAIN': 'hydro',\n 'PPATDI01': 'oil',\n 'PPLEHI': 'hydro',\n 'PPNOTG01': 'gas',\n 'PPNOTG02': 'gas',\n 'PROCDI01': 'oil',\n 'PROVTV01': 'gas',\n 'PTR1TG23': 'gas',\n 'PTR1TG24': 'gas',\n 'PTR1TG25': 'gas',\n 'PUPITV01': 'gas',\n 'PVIEHI': 'hydro',\n 'PZUEDI01': 'oil',\n 'QULLHI': 'hydro',\n 'RAFADI01': 'oil',\n 'RAW1EO': 'hydro',\n 'RAW2EO': 'hydro',\n 'RCEPDI01': 'oil',\n 'RCUATG02': 'gas',\n 'REALDI01': 'oil',\n 'REOLHI': 'hydro',\n 'RESCDI01': 'oil',\n 'RGDEHB': 'hydro',\n 'RHONHI': 'hydro',\n 'RICADI01': 'oil',\n 'ROCATG01': 'gas',\n 'ROJOTG01': 'gas',\n 'ROJOTG02': 'gas',\n 'ROJOTG03': 'gas',\n 'ROMEHI': 'hydro',\n 'RREYHI': 'hydro',\n 'RSAUDI01': 'oil',\n 'RTERTG01': 'gas',\n 'RTERTG02': 'gas',\n 'RUFIDI01': 'oil',\n 'SALOHI': 'hydro',\n 'SANADI01': 'oil',\n 'SANDHI': 'hydro',\n 'SARCTG21': 'gas',\n 'SARCTG22': 'gas',\n 'SARCTG23': 'gas',\n 'SCHADI01': 'oil',\n 'SCTPDI01': 'oil',\n 'SERTTG01': 'gas',\n 'SFR2DI01': 'oil',\n 'SFRATG01': 'gas',\n 'SFRATG02': 'gas',\n 'SGDEHIAR': 'hydro',\n 'SGUIHI': 'hydro',\n 'SHELTG01': 'gas',\n 'SJUAFV': 'hydro',\n 'SLTODI01': 'oil',\n 'SMANDI01': 'oil',\n 'SMARDI01': 'oil',\n 'SMIGDI01': 'oil',\n 'SMTUTG01': 'gas',\n 'SMTUTG02': 'gas',\n 'SMTUTV01': 'gas',\n 'SNICTV11': 'coal',\n 'SNICTV12': 'coal',\n 'SNICTV13': 'coal',\n 'SNICTV14': 'coal',\n 'SNICTV15': 'coal',\n 'SOESTG03': 'gas',\n 'SOLATG01': 'gas',\n 'SORRTV13': 'gas',\n 'SPE2DI01': 'oil',\n 'SPENDI01': 'oil',\n 'SPEVDI01': 'oil',\n 'SROQHI': 'hydro',\n 'SROSDI01': 'oil',\n 'SSALDI01': 'oil',\n 'SVICDI01': 'oil',\n 'TABATV01': 'gas',\n 'TANDTG01': 'gas',\n 'TANDTG02': 'gas',\n 'TANDTV01': 'gas',\n 'TARDDI01': 'oil',\n 'TELLDI01': 'oil',\n 'TERVDI01': 'oil',\n 'TIMBTG01': 'gas',\n 'TIMBTG02': 'gas',\n 'TIMBTV01': 'gas',\n 'TINODI01': 'oil',\n 'TORDEO': 'hydro',\n 'TUCUTG01': 'gas',\n 'TUCUTG02': 'gas',\n 'TUCUTV01': 'gas',\n 'TUNAHI': 'hydro',\n 'ULLUHI': 'hydro',\n 'VANGDI01': 'oil',\n 'VGADDI01': 'oil',\n 'VGEPDI01': 'oil',\n 'VGESTG11': 'gas',\n 'VGESTG14': 'gas',\n 'VGESTG16': 'gas',\n 'VGESTG18': 'gas',\n 'VIALDI01': 'oil',\n 'VMA2TG01': 'gas',\n 'VMA2TG02': 'gas',\n 'VMA2TG03': 'gas',\n 'VMA2TG04': 'gas',\n 'VMARTG01': 'gas',\n 'VMARTG02': 'gas',\n 'VMARTG03': 'gas',\n 'VOBLTG01': 'gas',\n 'VOBLTG02': 'gas',\n 'VOBLTV01': 'gas',\n 'VTUDDI01': 'oil',\n 'VTUEDI01': 'oil',\n 'YACYHI': 'hydro',\n 'YANQDI01': 'oil',\n 'YPFATG01': 'gas',\n 'ZAPATG01': 'gas',\n 'ZAPATG02': 'gas',\n 'ZAPATG03': 'gas',\n 'ZAPATG04': 'gas'\n}\n\n# URL's for thermal and hydro pages and data sources respectively.\n\nurl = ('http://portalweb.cammesa.com/MEMNet1/Pages/Informes%20por'\n '%20Categor%C3%ADa/Operativos/VisorReporteSinComDesp_minimal.aspx'\n '?hora=0&titulo=Despacho%20Generacion%20Termica&reportPath='\n 'http://lauzet:5000/MemNet1/ReportingServices/DespachoGeneracion'\n 'Termica.rdl--0--Despacho+Generaci%c3%b3n+T%c3%a9rmica')\n\nturl = ('http://portalweb.cammesa.com/Reserved.ReportViewerWebControl.'\n 'axd?Culture=3082&UICulture=3082&ReportStack=1'\n '&OpType=ReportArea&Controller=ClientController'\n 'ctl00_ctl04_g_a581304b_aafc_4818_a4a1_e96f27a22246_ctl00_RepViewer'\n '&ZoomMode=Percent&ZoomPct=100&ReloadDocMap='\n 'true&SearchStartPage=0&LinkTarget=_top')\n\nhurl = ('http://portalweb.cammesa.com/memnet1/Pages/Informes%20por%20Categor'\n '%C3%ADa/Operativos/VisorReportesSinCom_minimal.aspx?hora=0&'\n 'titulo=Despacho%20Generacion%20Hidraulica&reportPath='\n 'http://lauzet:5000/MemNet1/ReportingServices/'\n 'DespachoGeneracionHidraulica.rdl--0--Despacho+Generaci%c3%b3n+Zona+'\n 'Hidr%c3%a1ulica')\n\nthurl = ('http://portalweb.cammesa.com/Reserved.ReportViewerWebControl.'\n 'axd?Culture=3082&UICulture=3082&ReportStack=1'\n '&OpType=ReportArea&Controller=ClientController'\n 'ctl00_ctl04_g_966166c3_db78_453e_9a34_83d2bb263ee4_''ctl00_RepViewer'\n '&ZoomMode=Percent&ZoomPct=100&ReloadDocMap='\n 'true&SearchStartPage=0&LinkTarget=_top')\n\ncammesa_url = 'http://portalweb.cammesa.com/default.aspx'\n\n\ndef webparser(req):\n \"\"\"Takes content from webpage and returns all text as a list of strings\"\"\"\n\n soup = BeautifulSoup(req.content, 'html.parser')\n figs = soup.find_all(\"div\", class_=\"r11\")\n data_table = [unicode(tag.get_text()) for tag in figs]\n\n return data_table\n\n\ndef fetch_price(zone_key='AR', session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known power price of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'currency': EUR,\n 'datetime': '2017-01-01T00:00:00Z',\n 'price': 0.0,\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n s = session or requests.Session()\n price_req = s.get(cammesa_url)\n psoup = BeautifulSoup(price_req.content, 'html.parser')\n find_price = psoup.find('td', class_=\"cssFuncionesLeft\", align=\"left\")\n\n try:\n price_text = find_price.getText()\n\n # Strip all whitespace and isolate number. Convert to float.\n price_nws = \"\".join(price_text.split())\n lprice = price_nws.rpartition(':')[2]\n rprice = lprice.split('[')[0]\n price = float(rprice.replace(',', '.'))\n\n except (AttributeError, ValueError):\n # Price element not present or no price stated.\n price = None\n\n datetime = arrow.now('UTC-3').floor('hour').datetime\n\n data = {\n 'zoneKey': zone_key,\n 'currency': 'ARS',\n 'datetime': datetime,\n 'price': price,\n 'source': 'portalweb.cammesa.com'\n }\n\n return data\n\n\ndef get_datetime(session=None):\n \"\"\"\n Generation data is updated hourly. Makes request then finds most recent hour available.\n Returns an arrow datetime object using UTC-3 for timezone and zero for minutes and seconds.\n \"\"\"\n\n # Argentina does not currently observe daylight savings time. This may change from year to year!\n # https://en.wikipedia.org/wiki/Time_in_Argentina\n s = session or requests.Session()\n rt = s.get(url)\n timesoup = BeautifulSoup(rt.content, 'html.parser')\n find_hour = timesoup.find(\"option\", selected=\"selected\", value=\"1\").getText()\n at = arrow.now('UTC-3').floor('hour')\n datetime = (at.replace(hour=int(find_hour), minute=0, second=0)).datetime\n\n return {'datetime': datetime}\n\n\ndef dataformat(junk):\n \"\"\"Takes string data with only digits and returns it as a float.\"\"\"\n\n formatted = []\n for item in junk:\n if not any(char in item for char in string.ascii_letters):\n item = float(item.replace(',', '.'))\n formatted.append(item)\n\n return formatted\n\n\ndef get_thermal(session, logger):\n \"\"\"\n Requests thermal generation data then parses and sorts by type. Nuclear is included.\n Returns a dictionary.\n \"\"\"\n\n # Need to persist session in order to get ControlID and ReportSession so we can send second request\n # for table data. Both these variables change on each new request.\n s = session or requests.Session()\n r = s.get(url)\n pat = re.search(\"ControlID=[^&]*\", r.text).group()\n spat = re.search(\"ReportSession=[^&]*\", r.text).group()\n cid = pat.rpartition('=')[2]\n rs = spat.rpartition('=')[2]\n full_table = []\n\n # 'En Reserva' plants are not generating and can be ignored.\n # The table has an extra column on 'Costo Operativo' page which must be removed to find power generated correctly.\n\n pagenumber = 1\n reserves = False\n\n while not reserves:\n t = s.get(turl, params={'ControlID': cid, 'ReportSession': rs,\n 'PageNumber': '{}'.format(pagenumber)})\n text_only = webparser(t)\n if 'Estado' in text_only:\n for item in text_only:\n if len(item) == 1 and item in string.ascii_letters:\n text_only.remove(item)\n if 'En Reserva' in text_only:\n reserves = True\n continue\n full_table.append(text_only)\n pagenumber += 1\n\n data = list(itertools.chain.from_iterable(full_table))\n formatted_data = dataformat(data)\n mapped_data = [power_plant_type.get(x, x) for x in formatted_data]\n\n for item in mapped_data:\n try:\n # avoids including titles and headings\n if all((item.isupper(), not item.isalpha(), ' ' not in item)):\n logger.warning(\n '{} is missing from the AR plant mapping!'.format(item),\n extra={'key': 'AR'})\n except AttributeError:\n # not a string....\n continue\n\n find_totals = [i + 1 for i, x in enumerate(mapped_data) if x == 'Totales ']\n thermal_generation = sum([mapped_data[i] for i in find_totals])\n\n find_nuclear = [i + 2 for i, x in enumerate(mapped_data) if x == 'nuclear']\n nuclear_generation = sum([mapped_data[i] for i in find_nuclear])\n find_oil = [i + 2 for i, x in enumerate(mapped_data) if x == 'oil']\n oil_generation = sum([mapped_data[i] for i in find_oil])\n find_coal = [i + 2 for i, x in enumerate(mapped_data) if x == 'coal']\n coal_generation = sum([mapped_data[i] for i in find_coal])\n find_biomass = [i + 2 for i, x in enumerate(mapped_data) if x == 'biomass']\n biomass_generation = sum([mapped_data[i] for i in find_biomass])\n find_gas = [i + 2 for i, x in enumerate(mapped_data) if x == 'gas']\n gas_generation = sum([mapped_data[i] for i in find_gas])\n\n unknown_generation = (thermal_generation - nuclear_generation - gas_generation\n - oil_generation - coal_generation - biomass_generation)\n\n if unknown_generation < 0.0:\n unknown_generation = 0.0\n\n return {'gas': gas_generation,\n 'nuclear': nuclear_generation,\n 'coal': coal_generation,\n 'unknown': unknown_generation,\n 'oil': oil_generation,\n 'biomass': biomass_generation\n }\n\n\ndef get_hydro(session=None):\n \"\"\"Requests hydro generation data then parses, returns a dictionary.\"\"\"\n\n s = session or requests.Session()\n r = s.get(hurl)\n pat = re.search(\"ControlID=[^&]*\", r.text).group()\n spat = re.search(\"ReportSession=[^&]*\", r.text).group()\n cid = pat.rpartition('=')[2]\n rs = spat.rpartition('=')[2]\n full_table = []\n\n pagenumber = 1\n reserves = False\n\n while not reserves:\n t = s.get(thurl, params={'ControlID': cid, 'ReportSession': rs,\n 'PageNumber': '{}'.format(pagenumber)})\n text_only = webparser(t)\n if 'En Reserva' in text_only:\n reserves = True\n continue\n full_table.append(text_only)\n pagenumber += 1\n\n data = list(itertools.chain.from_iterable(full_table))\n formatted_data = dataformat(data)\n find_hydro = [i + 1 for i, x in enumerate(formatted_data) if x == 'Totales ']\n total_hydro_generation = sum([formatted_data[i] for i in find_hydro])\n\n return {'hydro': total_hydro_generation}\n\n\ndef fetch_production(zone_key='AR', session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n target_datetime: if we want to parser for a specific time and not latest\n logger: where to log useful information\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime is not None:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n gdt = get_datetime(session=None)\n thermal = get_thermal(session, logger)\n hydro = get_hydro(session=None)\n production_mix = {\n 'zoneKey': zone_key,\n 'datetime': gdt['datetime'],\n 'production': {\n 'biomass': thermal.get('biomass', 0.0),\n 'coal': thermal.get('coal', 0.0),\n 'gas': thermal.get('gas', 0.0),\n 'hydro': hydro.get('hydro', 0.0),\n 'nuclear': thermal.get('nuclear', 0.0),\n 'oil': thermal.get('oil', 0.0),\n 'solar': None,\n 'wind': None,\n 'geothermal': 0.0,\n 'unknown': thermal.get('unknown', 0.0)\n },\n 'storage': {\n 'hydro': None,\n },\n 'source': 'portalweb.cammesa.com'\n }\n\n return production_mix\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_price() ->')\n print(fetch_price())\n",
"path": "parsers/AR.py"
}
] | diff --git a/parsers/AR.py b/parsers/AR.py
index 30e0ac7d78..76f79d8432 100644
--- a/parsers/AR.py
+++ b/parsers/AR.py
@@ -43,6 +43,10 @@
'ALEMDI01': 'oil',
'ALICHI': 'hydro',
'ALOMDI01': 'gas',
+ 'ALUATG06': 'gas',
+ 'ALUATG07': 'gas',
+ 'ALUATG08': 'gas',
+ 'ALUATV01': 'gas',
'ALUMDI01': 'oil',
'AMEGHI': 'hydro',
'ANATDI01': 'gas',
|
pyca__cryptography-3731 | release infrastrucutre doesn't handle "out of order" releases
Specifically if we issue an `0.X` release, then an `0.X+1` release, and then we go to do an `0.X.1` release, the wheel automation won't work, since it builds a wheel for the latest release.
| [
{
"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport getpass\nimport io\nimport os\nimport subprocess\nimport time\n\nimport click\n\nfrom clint.textui.progress import Bar as ProgressBar\n\nimport requests\n\n\nJENKINS_URL = (\n \"https://ci.cryptography.io/job/cryptography-support-jobs/\"\n \"job/wheel-builder\"\n)\n\n\ndef run(*args, **kwargs):\n kwargs.setdefault(\"stderr\", subprocess.STDOUT)\n try:\n subprocess.check_output(list(args), **kwargs)\n except subprocess.CalledProcessError as e:\n # Reraise this with a different type so that str(e) is something with\n # stdout in it.\n raise Exception(e.cmd, e.returncode, e.output)\n\n\ndef wait_for_build_completed(session):\n # Wait 20 seconds before actually checking if the build is complete, to\n # ensure that it had time to really start.\n time.sleep(20)\n while True:\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n if not response.json()[\"building\"]:\n assert response.json()[\"result\"] == \"SUCCESS\"\n break\n time.sleep(0.1)\n\n\ndef download_artifacts(session):\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\"\n }\n )\n response.raise_for_status()\n json_response = response.json()\n assert not json_response[\"building\"]\n assert json_response[\"result\"] == \"SUCCESS\"\n\n paths = []\n\n for artifact in json_response[\"artifacts\"]:\n response = session.get(\n \"{0}artifact/{1}\".format(\n json_response[\"url\"], artifact[\"relativePath\"]\n ), stream=True\n )\n assert response.headers[\"content-length\"]\n print(\"Downloading {0}\".format(artifact[\"fileName\"]))\n bar = ProgressBar(\n expected_size=int(response.headers[\"content-length\"]),\n filled_char=\"=\"\n )\n content = io.BytesIO()\n for data in response.iter_content(chunk_size=8192):\n content.write(data)\n bar.show(content.tell())\n assert bar.expected_size == content.tell()\n bar.done()\n out_path = os.path.join(\n os.path.dirname(__file__),\n \"dist\",\n artifact[\"fileName\"],\n )\n with open(out_path, \"wb\") as f:\n f.write(content.getvalue())\n paths.append(out_path)\n return paths\n\n\[email protected]()\[email protected](\"version\")\ndef release(version):\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n run(\"git\", \"tag\", \"-s\", version, \"-m\", \"{0} release\".format(version))\n run(\"git\", \"push\", \"--tags\")\n\n run(\"python\", \"setup.py\", \"sdist\")\n run(\"python\", \"setup.py\", \"sdist\", \"bdist_wheel\", cwd=\"vectors/\")\n\n run(\n \"twine\", \"upload\", \"-s\", \"dist/cryptography-{0}*\".format(version),\n \"vectors/dist/cryptography_vectors-{0}*\".format(version), shell=True\n )\n\n session = requests.Session()\n\n # This tells the CDN to delete the cached response for the URL. We do this\n # so that the Jenkins builders will see the new sdist immediately when they\n # go to build the wheels.\n response = session.request(\n \"PURGE\", \"https://pypi.python.org/simple/cryptography/\"\n )\n response.raise_for_status()\n\n token = getpass.getpass(\"Input the Jenkins token: \")\n response = session.get(\n \"{0}/build\".format(JENKINS_URL),\n params={\n \"token\": token,\n \"cause\": \"Building wheels for {0}\".format(version)\n }\n )\n response.raise_for_status()\n wait_for_build_completed(session)\n paths = download_artifacts(session)\n run(\"twine\", \"upload\", \" \".join(paths))\n\n\nif __name__ == \"__main__\":\n release()\n",
"path": "release.py"
}
] | [
{
"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport getpass\nimport io\nimport os\nimport subprocess\nimport time\n\nimport click\n\nfrom clint.textui.progress import Bar as ProgressBar\n\nimport requests\n\n\nJENKINS_URL = (\n \"https://ci.cryptography.io/job/cryptography-support-jobs/\"\n \"job/wheel-builder\"\n)\n\n\ndef run(*args, **kwargs):\n kwargs.setdefault(\"stderr\", subprocess.STDOUT)\n try:\n subprocess.check_output(list(args), **kwargs)\n except subprocess.CalledProcessError as e:\n # Reraise this with a different type so that str(e) is something with\n # stdout in it.\n raise Exception(e.cmd, e.returncode, e.output)\n\n\ndef wait_for_build_completed(session):\n # Wait 20 seconds before actually checking if the build is complete, to\n # ensure that it had time to really start.\n time.sleep(20)\n while True:\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n if not response.json()[\"building\"]:\n assert response.json()[\"result\"] == \"SUCCESS\"\n break\n time.sleep(0.1)\n\n\ndef download_artifacts(session):\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\"\n }\n )\n response.raise_for_status()\n json_response = response.json()\n assert not json_response[\"building\"]\n assert json_response[\"result\"] == \"SUCCESS\"\n\n paths = []\n\n for artifact in json_response[\"artifacts\"]:\n response = session.get(\n \"{0}artifact/{1}\".format(\n json_response[\"url\"], artifact[\"relativePath\"]\n ), stream=True\n )\n assert response.headers[\"content-length\"]\n print(\"Downloading {0}\".format(artifact[\"fileName\"]))\n bar = ProgressBar(\n expected_size=int(response.headers[\"content-length\"]),\n filled_char=\"=\"\n )\n content = io.BytesIO()\n for data in response.iter_content(chunk_size=8192):\n content.write(data)\n bar.show(content.tell())\n assert bar.expected_size == content.tell()\n bar.done()\n out_path = os.path.join(\n os.path.dirname(__file__),\n \"dist\",\n artifact[\"fileName\"],\n )\n with open(out_path, \"wb\") as f:\n f.write(content.getvalue())\n paths.append(out_path)\n return paths\n\n\[email protected]()\[email protected](\"version\")\ndef release(version):\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n run(\"git\", \"tag\", \"-s\", version, \"-m\", \"{0} release\".format(version))\n run(\"git\", \"push\", \"--tags\")\n\n run(\"python\", \"setup.py\", \"sdist\")\n run(\"python\", \"setup.py\", \"sdist\", \"bdist_wheel\", cwd=\"vectors/\")\n\n run(\n \"twine\", \"upload\", \"-s\", \"dist/cryptography-{0}*\".format(version),\n \"vectors/dist/cryptography_vectors-{0}*\".format(version), shell=True\n )\n\n session = requests.Session()\n\n # This tells the CDN to delete the cached response for the URL. We do this\n # so that the Jenkins builders will see the new sdist immediately when they\n # go to build the wheels.\n response = session.request(\n \"PURGE\", \"https://pypi.python.org/simple/cryptography/\"\n )\n response.raise_for_status()\n\n token = getpass.getpass(\"Input the Jenkins token: \")\n response = session.get(\n \"{0}/build\".format(JENKINS_URL),\n params={\n \"token\": token,\n \"BUILD_VERSION\": version,\n \"cause\": \"Building wheels for {0}\".format(version)\n }\n )\n response.raise_for_status()\n wait_for_build_completed(session)\n paths = download_artifacts(session)\n run(\"twine\", \"upload\", \" \".join(paths))\n\n\nif __name__ == \"__main__\":\n release()\n",
"path": "release.py"
}
] | diff --git a/.jenkins/Jenkinsfile-cryptography-wheel-builder b/.jenkins/Jenkinsfile-cryptography-wheel-builder
index 55429ca473c3..d06e6efa6ca8 100644
--- a/.jenkins/Jenkinsfile-cryptography-wheel-builder
+++ b/.jenkins/Jenkinsfile-cryptography-wheel-builder
@@ -1,3 +1,10 @@
+properties([
+ parameters([
+ string(defaultValue: '', description: 'The version from PyPI to build', name: 'BUILD_VERSION')
+ ]),
+ pipelineTriggers([])
+])
+
def configs = [
[
label: 'windows',
@@ -59,7 +66,7 @@ def build(version, label) {
virtualenv -p %PYTHON% .release
call .release\\Scripts\\activate
pip install wheel virtualenv
- pip wheel cryptography --wheel-dir=wheelhouse --no-binary cryptography
+ pip wheel cryptography==$BUILD_VERSION --wheel-dir=wheelhouse --no-binary cryptography
pip install -f wheelhouse cryptography --no-index
python -c "from cryptography.hazmat.backends.openssl.backend import backend;print('Loaded: ' + backend.openssl_version_text());print('Linked Against: ' + backend._ffi.string(backend._lib.OPENSSL_VERSION_TEXT).decode('ascii'))"
"""
@@ -88,7 +95,7 @@ def build(version, label) {
source .venv/bin/activate
pip install -U wheel # upgrade wheel to latest before we use it to build the wheel
# -mmacosx-version-min=10.9 can be remove when https://github.com/pyca/cryptography/issues/3635 is resolved
- CRYPTOGRAPHY_SUPPRESS_LINK_FLAGS="1" LDFLAGS="/usr/local/opt/[email protected]/lib/libcrypto.a /usr/local/opt/[email protected]/lib/libssl.a" CFLAGS="-I/usr/local/opt/[email protected]/include -mmacosx-version-min=10.9" pip wheel cryptography --wheel-dir=wheelhouse --no-binary cryptography
+ CRYPTOGRAPHY_SUPPRESS_LINK_FLAGS="1" LDFLAGS="/usr/local/opt/[email protected]/lib/libcrypto.a /usr/local/opt/[email protected]/lib/libssl.a" CFLAGS="-I/usr/local/opt/[email protected]/include -mmacosx-version-min=10.9" pip wheel cryptography==$BUILD_VERSION --wheel-dir=wheelhouse --no-binary cryptography
pip install -f wheelhouse cryptography --no-index
python -c "from cryptography.hazmat.backends.openssl.backend import backend;print('Loaded: ' + backend.openssl_version_text());print('Linked Against: ' + backend._ffi.string(backend._lib.OPENSSL_VERSION_TEXT).decode('ascii'))"
otool -L `find .venv -name '_openssl*.so'`
diff --git a/release.py b/release.py
index ec2e8f721202..0894d23fda2b 100644
--- a/release.py
+++ b/release.py
@@ -126,6 +126,7 @@ def release(version):
"{0}/build".format(JENKINS_URL),
params={
"token": token,
+ "BUILD_VERSION": version,
"cause": "Building wheels for {0}".format(version)
}
)
|
hedyorg__hedy-467 | Sometimes a "server error" HTML page is served with a 200 response code
As discovered in #464, when the internal server error is triggered by submitting a program to parse without supplying a cookie:
```
$ curl 'https://hedy-alpha.herokuapp.com/parse' \
-H 'Content-Type: application/json' \
--data-raw '{"level":"1","code":"print hallo wereld\u0021","lang":"nl"}' \
--compressed -vv
< HTTP/1.1 200 OK
...
<h1>500 Internal Server Error</h1>
```
As you can see, the response is served using a 200 response code. This leads to the error going "undetected" in a bunch of places:
* Not counted in Heroku's dashboard
* Not visible in the router log
Worst of all:
* No exception trace shown in the error log, so no way to debug the error.
| [
{
"content": "import sys\nif (sys.version_info.major < 3 or sys.version_info.minor < 6):\n print ('Hedy requires Python 3.6 or newer to run. However, your version of Python is', '.'.join ([str (sys.version_info.major), str (sys.version_info.minor), str (sys.version_info.micro)]))\n quit ()\n\n# coding=utf-8\nimport datetime\nimport collections\nimport hedy\nimport json\nimport jsonbin\nimport logging\nimport os\nfrom os import path\nimport re\nimport traceback\nimport uuid\nfrom ruamel import yaml\nfrom flask_commonmark import Commonmark\nfrom werkzeug.urls import url_encode\nfrom config import config\nfrom auth import auth_templates, current_user, requires_login, is_admin, is_teacher\nfrom utils import db_get, db_get_many, db_create, db_update, timems, type_check, object_check, db_del, load_yaml, load_yaml_rt, dump_yaml_rt, version\nimport utils\n\n# app.py\nfrom flask import Flask, request, jsonify, session, abort, g, redirect, Response\nfrom flask_helpers import render_template\nfrom flask_compress import Compress\n\n# Hedy-specific modules\nimport courses\nimport hedyweb\nimport translating\nimport querylog\nimport aws_helpers\nimport ab_proxying\nimport cdn\n\n# Set the current directory to the root Hedy folder\nos.chdir(os.path.join (os.getcwd (), __file__.replace (os.path.basename (__file__), '')))\n\n# Define and load all available language data\nALL_LANGUAGES = {\n 'en': 'English',\n 'nl': 'Nederlands',\n 'es': 'Español',\n 'fr': 'Français',\n 'pt_br': 'Português',\n 'de': 'Deutsch',\n 'it': 'Italiano',\n 'sw': 'Swahili',\n 'hu': 'Magyar',\n 'el': 'Ελληνικά',\n \"zh\": \"简体中文\"\n}\n\nLEVEL_DEFAULTS = collections.defaultdict(courses.NoSuchDefaults)\nfor lang in ALL_LANGUAGES.keys():\n LEVEL_DEFAULTS[lang] = courses.LevelDefaults(lang)\n\nHEDY_COURSE = collections.defaultdict(courses.NoSuchCourse)\nfor lang in ALL_LANGUAGES.keys():\n HEDY_COURSE[lang] = courses.Course('hedy', lang, LEVEL_DEFAULTS[lang])\n\nSPACE_EU_COURSE = {'nl': courses.Course('space_eu', 'nl', LEVEL_DEFAULTS['nl']),\n 'en': courses.Course('space_eu', 'en', LEVEL_DEFAULTS['en']),\n 'es': courses.Course('space_eu', 'es', LEVEL_DEFAULTS['es'])\n }\n\nONLINE_MASTERS_COURSE = courses.Course('online_masters', 'nl', LEVEL_DEFAULTS['nl'])\n\nTRANSLATIONS = hedyweb.Translations()\n\ndef load_adventures_in_all_languages():\n adventures = {}\n for lang in ALL_LANGUAGES.keys ():\n adventures[lang] = load_yaml(f'coursedata/adventures/{lang}.yaml')\n return adventures\n\n\ndef load_adventure_for_language(lang):\n adventures = load_adventures_in_all_languages()\n if not lang in adventures or len (adventures [lang]) == 0:\n return adventures ['en']\n return adventures [lang]\n\n\ndef load_adventure_assignments_per_level(lang, level):\n\n loaded_programs = {}\n # If user is logged in, we iterate their programs that belong to the current level. Out of these, we keep the latest created program for both the level mode (no adventure) and for each of the adventures.\n if current_user (request) ['username']:\n user_programs = db_get_many ('programs', {'username': current_user (request) ['username']}, True)\n for program in user_programs:\n if program ['level'] != level:\n continue\n program_key = 'level' if not program.get ('adventure_name') else program ['adventure_name']\n if not program_key in loaded_programs:\n loaded_programs [program_key] = program\n elif loaded_programs [program_key] ['date'] < program ['date']:\n loaded_programs [program_key] = program\n\n assignments = []\n adventures = load_adventure_for_language(lang)['adventures']\n for short_name, adventure in adventures.items ():\n if not level in adventure['levels']:\n continue\n assignments.append({\n 'short_name': short_name,\n 'name': adventure['name'],\n 'image': adventure.get('image', None),\n 'default_save_name': adventure['default_save_name'],\n 'text': adventure['levels'][level].get('story_text', 'No Story Text'),\n 'start_code': adventure['levels'][level].get ('start_code', ''),\n 'loaded_program': '' if not loaded_programs.get (short_name) else loaded_programs.get (short_name) ['code'],\n 'loaded_program_name': '' if not loaded_programs.get (short_name) else loaded_programs.get (short_name) ['name']\n })\n # We create a 'level' pseudo assignment to store the loaded program for level mode, if any.\n assignments.append({\n 'short_name': 'level',\n 'loaded_program': '' if not loaded_programs.get ('level') else loaded_programs.get ('level') ['code'],\n 'loaded_program_name': '' if not loaded_programs.get ('level') else loaded_programs.get ('level') ['name']\n })\n return assignments\n\n# Load main menu (do it once, can be cached)\nwith open(f'main/menu.json', 'r', encoding='utf-8') as f:\n main_menu_json = json.load(f)\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format='[%(asctime)s] %(levelname)-8s: %(message)s')\n\n\napp = Flask(__name__, static_url_path='')\n# Ignore trailing slashes in URLs\napp.url_map.strict_slashes = False\n\ncdn.Cdn(app, os.getenv('CDN_PREFIX'), os.getenv('HEROKU_SLUG_COMMIT', 'dev'))\n\n# Set session id if not already set. This must be done as one of the first things,\n# so the function should be defined high up.\[email protected]_request\ndef set_session_cookie():\n session_id()\n\nif os.getenv('IS_PRODUCTION'):\n @app.before_request\n def reject_e2e_requests():\n if utils.is_testing_request (request):\n return 'No E2E tests are allowed in production', 400\n\[email protected]_request\ndef before_request_proxy_testing():\n if utils.is_testing_request (request):\n if os.getenv ('IS_TEST_ENV'):\n session ['test_session'] = 'test'\n\n# HTTP -> HTTPS redirect\n# https://stackoverflow.com/questions/32237379/python-flask-redirect-to-https-from-http/32238093\nif os.getenv ('REDIRECT_HTTP_TO_HTTPS'):\n @app.before_request\n def before_request_https():\n if request.url.startswith('http://'):\n url = request.url.replace('http://', 'https://', 1)\n # We use a 302 in case we need to revert the redirect.\n return redirect(url, code=302)\n\n# Unique random key for sessions.\n# For settings with multiple workers, an environment variable is required, otherwise cookies will be constantly removed and re-set by different workers.\nif utils.is_production():\n if not os.getenv ('SECRET_KEY'):\n raise RuntimeError('The SECRET KEY must be provided for non-dev environments.')\n\n app.config['SECRET_KEY'] = os.getenv ('SECRET_KEY')\n\nelse:\n app.config['SECRET_KEY'] = os.getenv ('SECRET_KEY', uuid.uuid4().hex)\n\nif utils.is_heroku():\n app.config.update(\n SESSION_COOKIE_SECURE=True,\n SESSION_COOKIE_HTTPONLY=True,\n SESSION_COOKIE_SAMESITE='Lax',\n )\n\n# Set security attributes for cookies in a central place - but not when running locally, so that session cookies work well without HTTPS\n\nCompress(app)\nCommonmark(app)\nlogger = jsonbin.JsonBinLogger.from_env_vars()\nquerylog.LOG_QUEUE.set_transmitter(aws_helpers.s3_transmitter_from_env())\n\n# Check that requested language is supported, otherwise return 404\[email protected]_request\ndef check_language():\n if requested_lang() not in ALL_LANGUAGES.keys ():\n return \"Language \" + requested_lang () + \" not supported\", 404\n\nif utils.is_heroku() and not os.getenv('HEROKU_RELEASE_CREATED_AT'):\n logging.warning('Cannot determine release; enable Dyno metadata by running \"heroku labs:enable runtime-dyno-metadata -a <APP_NAME>\"')\n\n\[email protected]_request\ndef before_request_begin_logging():\n querylog.begin_global_log_record(path=request.path, method=request.method)\n\[email protected]_request\ndef after_request_log_status(response):\n querylog.log_value(http_code=response.status_code)\n return response\n\[email protected]_request\ndef teardown_request_finish_logging(exc):\n querylog.finish_global_log_record(exc)\n\n# If present, PROXY_TO_TEST_HOST should be the 'http[s]://hostname[:port]' of the target environment\nif os.getenv ('PROXY_TO_TEST_HOST') and not os.getenv ('IS_TEST_ENV'):\n ab_proxying.ABProxying(app, os.getenv ('PROXY_TO_TEST_HOST'), app.config['SECRET_KEY'])\n\[email protected]('/session_test', methods=['GET'])\ndef echo_session_vars_test():\n if not utils.is_testing_request (request):\n return 'This endpoint is only meant for E2E tests', 400\n return jsonify({'session': dict(session)})\n\[email protected]('/session_main', methods=['GET'])\ndef echo_session_vars_main():\n if not utils.is_testing_request (request):\n return 'This endpoint is only meant for E2E tests', 400\n return jsonify({'session': dict(session), 'proxy_enabled': bool (os.getenv ('PROXY_TO_TEST_HOST'))})\n\[email protected]('/parse', methods=['POST'])\ndef parse():\n body = request.json\n if not body:\n return \"body must be an object\", 400\n if 'code' not in body:\n return \"body.code must be a string\", 400\n if 'level' not in body:\n return \"body.level must be a string\", 400\n if 'sublevel' in body and not type_check (body ['sublevel'], 'int'):\n return \"If present, body.sublevel must be an integer\", 400\n if 'adventure_name' in body and not type_check (body ['adventure_name'], 'str'):\n return \"if present, body.adventure_name must be a string\", 400\n\n code = body ['code']\n level = int(body ['level'])\n sublevel = body.get ('sublevel') or 0\n\n # Language should come principally from the request body,\n # but we'll fall back to browser default if it's missing for whatever\n # reason.\n lang = body.get('lang', requested_lang())\n\n response = {}\n username = current_user(request) ['username'] or None\n\n querylog.log_value(level=level, lang=lang, session_id=session_id(), username=username)\n\n # Check if user sent code\n if not code:\n response[\"Error\"] = \"no code found, please send code.\"\n # is so, parse\n else:\n try:\n hedy_errors = TRANSLATIONS.get_translations(lang, 'HedyErrorMessages')\n with querylog.log_time('transpile'):\n result = hedy.transpile(code, level,sublevel)\n response[\"Code\"] = \"# coding=utf8\\nimport random\\n\" + result\n except hedy.HedyException as E:\n traceback.print_exc()\n # some 'errors' can be fixed, for these we throw an exception, but also\n # return fixed code, so it can be ran\n if E.args[0] == \"Invalid Space\":\n error_template = hedy_errors[E.error_code]\n response[\"Code\"] = \"# coding=utf8\\n\" + E.arguments['fixed_code']\n response[\"Warning\"] = error_template.format(**E.arguments)\n elif E.args[0] == \"Parse\":\n error_template = hedy_errors[E.error_code]\n # Localize the names of characters\n if 'character_found' in E.arguments:\n E.arguments['character_found'] = hedy_errors[E.arguments['character_found']]\n response[\"Error\"] = error_template.format(**E.arguments)\n elif E.args[0] == \"Unquoted Text\":\n error_template = hedy_errors[E.error_code]\n response[\"Error\"] = error_template.format(**E.arguments)\n else:\n error_template = hedy_errors[E.error_code]\n response[\"Error\"] = error_template.format(**E.arguments)\n except Exception as E:\n traceback.print_exc()\n print(f\"error transpiling {code}\")\n response[\"Error\"] = str(E)\n querylog.log_value(server_error=response.get('Error'))\n logger.log ({\n 'session': session_id(),\n 'date': str(datetime.datetime.now()),\n 'level': level,\n 'lang': lang,\n 'code': code,\n 'server_error': response.get('Error'),\n 'version': version(),\n 'username': username,\n 'is_test': 1 if os.getenv ('IS_TEST_ENV') else None,\n 'adventure_name': body.get('adventure_name', None)\n })\n\n return jsonify(response)\n\[email protected]('/report_error', methods=['POST'])\ndef report_error():\n post_body = request.json\n\n logger.log ({\n 'session': session_id(),\n 'date': str(datetime.datetime.now()),\n 'level': post_body.get('level'),\n 'code': post_body.get('code'),\n 'client_error': post_body.get('client_error'),\n 'version': version(),\n 'username': current_user(request) ['username'] or None,\n 'is_test': 1 if os.getenv ('IS_TEST_ENV') else None\n })\n\n return 'logged'\n\[email protected]('/version', methods=['GET'])\ndef version_page():\n \"\"\"\n Generate a page with some diagnostic information and a useful GitHub URL on upcoming changes.\n\n This is an admin-only page, it does not need to be linked.\n (Also does not have any sensitive information so it's fine to be unauthenticated).\n \"\"\"\n app_name = os.getenv('HEROKU_APP_NAME')\n\n vrz = os.getenv('HEROKU_RELEASE_CREATED_AT')\n the_date = datetime.date.fromisoformat(vrz[:10]) if vrz else datetime.date.today()\n\n commit = os.getenv('HEROKU_SLUG_COMMIT', '????')[0:6]\n\n return render_template('version-page.html',\n app_name=app_name,\n heroku_release_time=the_date,\n commit=commit)\n\n\ndef programs_page (request):\n username = current_user(request) ['username']\n if not username:\n return \"unauthorized\", 403\n\n from_user = request.args.get('user') or None\n if from_user and not is_admin (request):\n return \"unauthorized\", 403\n\n texts=TRANSLATIONS.data [requested_lang ()] ['Programs']\n ui=TRANSLATIONS.data [requested_lang ()] ['ui']\n adventures = load_adventure_for_language(requested_lang ())['adventures']\n\n result = db_get_many ('programs', {'username': from_user or username}, True)\n programs = []\n now = timems ()\n for item in result:\n measure = texts ['minutes']\n date = round ((now - item ['date']) / 60000)\n if date > 90:\n measure = texts ['hours']\n date = round (date / 60)\n if date > 36:\n measure = texts ['days']\n\n date = round (date / 24)\n\n programs.append ({'id': item ['id'], 'code': item ['code'], 'date': texts ['ago-1'] + ' ' + str (date) + ' ' + measure + ' ' + texts ['ago-2'], 'level': item ['level'], 'name': item ['name'], 'adventure_name': item.get ('adventure_name')})\n\n return render_template('programs.html', lang=requested_lang(), menu=render_main_menu('programs'), texts=texts, ui=ui, auth=TRANSLATIONS.data [requested_lang ()] ['Auth'], programs=programs, username=username, current_page='programs', from_user=from_user, adventures=adventures)\n\n# Adventure mode\[email protected]('/hedy/adventures', methods=['GET'])\ndef adventures_list():\n return render_template('adventures.html', lang=lang, adventures=load_adventure_for_language (requested_lang ()), menu=render_main_menu('adventures'), username=current_user(request) ['username'], auth=TRANSLATIONS.data [lang] ['Auth'])\n\[email protected]('/hedy/adventures/<adventure_name>', methods=['GET'], defaults={'level': 1})\[email protected]('/hedy/adventures/<adventure_name>/<level>', methods=['GET'])\ndef adventure_page(adventure_name, level):\n\n user = current_user (request)\n level = int (level)\n adventures = load_adventure_for_language (requested_lang ())\n\n # If requested adventure does not exist, return 404\n if not adventure_name in adventures ['adventures']:\n return 'No such Hedy adventure!', 404\n\n adventure = adventures ['adventures'] [adventure_name]\n\n # If no level is specified (this will happen if the last element of the path (minus the query parameter) is the same as the adventure_name)\n if re.sub (r'\\?.+', '', request.url.split ('/') [len (request.url.split ('/')) - 1]) == adventure_name:\n # If user is logged in, check if they have a program for this adventure\n # If there are many, note the highest level for which there is a saved program\n desired_level = 0\n if user ['username']:\n existing_programs = db_get_many ('programs', {'username': user ['username']}, True)\n for program in existing_programs:\n if 'adventure_name' in program and program ['adventure_name'] == adventure_name and program ['level'] > desired_level:\n desired_level = program ['level']\n # If the user has a saved program for this adventure, redirect them to the level with the highest adventure\n if desired_level != 0:\n return redirect(request.url.replace ('/' + adventure_name, '/' + adventure_name + '/' + str (desired_level)), code=302)\n # If user is not logged in, or has no saved programs for this adventure, default to the lowest level available for the adventure\n if desired_level == 0:\n for key in adventure ['levels'].keys ():\n if type_check (key, 'int') and (desired_level == 0 or desired_level > key):\n desired_level = key\n level = desired_level\n\n # If requested level is not in adventure, return 404\n if not level in adventure ['levels']:\n abort(404)\n\n adventure_assignments = load_adventure_assignments_per_level(requested_lang(), level)\n g.prefix = '/hedy'\n return hedyweb.render_assignment_editor(\n request=request,\n course=HEDY_COURSE[requested_lang()],\n level_number=level,\n assignment_number=1,\n menu=render_main_menu('hedy'),\n translations=TRANSLATIONS,\n version=version(),\n adventure_assignments=adventure_assignments,\n # The relevant loaded program will be available to client-side js and it will be loaded by js.\n loaded_program='',\n loaded_program_name='',\n adventure_name=adventure_name)\n\n# routing to index.html\[email protected]('/hedy', methods=['GET'], defaults={'level': '1', 'step': 1})\[email protected]('/hedy/<level>', methods=['GET'], defaults={'step': 1})\[email protected]('/hedy/<level>/<step>', methods=['GET'])\ndef index(level, step):\n\n\n # Sublevel requested\n if re.match ('\\d+-\\d+', level):\n pass\n # If level has a dash, we keep it as a string\n # Normal level requested\n elif re.match ('\\d', level):\n try:\n g.level = level = int(level)\n except:\n return 'No such Hedy level!', 404\n else:\n return 'No such Hedy level!', 404\n\n g.lang = requested_lang()\n g.prefix = '/hedy'\n\n loaded_program = ''\n loaded_program_name = ''\n adventure_name = ''\n\n # If step is a string that has more than two characters, it must be an id of a program\n if step and type_check (step, 'str') and len (step) > 2:\n result = db_get ('programs', {'id': step})\n if not result:\n return 'No such program', 404\n # Allow only the owner of the program, the admin user and the teacher users to access the program\n user = current_user (request)\n if user ['username'] != result ['username'] and not is_admin (request) and not is_teacher (request):\n return 'No such program!', 404\n loaded_program = result ['code']\n loaded_program_name = result ['name']\n if 'adventure_name' in result:\n adventure_name = result ['adventure_name']\n # We default to step 1 to provide a meaningful default assignment\n step = 1\n\n adventure_assignments = load_adventure_assignments_per_level(g.lang, level)\n\n return hedyweb.render_assignment_editor(\n request=request,\n course=HEDY_COURSE[g.lang],\n level_number=level,\n assignment_number=step,\n menu=render_main_menu('hedy'),\n translations=TRANSLATIONS,\n version=version(),\n adventure_assignments=adventure_assignments,\n loaded_program=loaded_program,\n loaded_program_name=loaded_program_name,\n adventure_name=adventure_name)\n\[email protected]('/onlinemasters', methods=['GET'], defaults={'level': 1, 'step': 1})\[email protected]('/onlinemasters/<level>', methods=['GET'], defaults={'step': 1})\[email protected]('/onlinemasters/<level>/<step>', methods=['GET'])\ndef onlinemasters(level, step):\n g.level = level = int(level)\n g.lang = lang = requested_lang()\n g.prefix = '/onlinemasters'\n\n adventure_assignments = load_adventure_assignments_per_level(g.lang, level)\n\n return hedyweb.render_assignment_editor(\n request=request,\n course=ONLINE_MASTERS_COURSE,\n level_number=level,\n assignment_number=step,\n translations=TRANSLATIONS,\n version=version(),\n menu=None,\n adventure_assignments=adventure_assignments,\n loaded_program='',\n loaded_program_name='',\n adventure_name='')\n\[email protected]('/space_eu', methods=['GET'], defaults={'level': 1, 'step': 1})\[email protected]('/space_eu/<level>', methods=['GET'], defaults={'step': 1})\[email protected]('/space_eu/<level>/<step>', methods=['GET'])\ndef space_eu(level, step):\n g.level = level = int(level)\n g.lang = requested_lang()\n g.prefix = '/space_eu'\n\n adventure_assignments = load_adventure_assignments_per_level(g.lang, level)\n\n return hedyweb.render_assignment_editor(\n request=request,\n course=SPACE_EU_COURSE[g.lang],\n level_number=level,\n assignment_number=step,\n translations=TRANSLATIONS,\n version=version(),\n menu=None,\n adventure_assignments=adventure_assignments,\n loaded_program='',\n loaded_program_name='',\n adventure_name='')\n\n\n\[email protected]('/error_messages.js', methods=['GET'])\ndef error():\n error_messages = TRANSLATIONS.get_translations(requested_lang(), \"ClientErrorMessages\")\n return render_template(\"error_messages.js\", error_messages=json.dumps(error_messages))\n\n\[email protected](500)\ndef internal_error(exception):\n import traceback\n print(traceback.format_exc())\n return \"<h1>500 Internal Server Error</h1>\"\n\[email protected]('/index.html')\[email protected]('/')\ndef default_landing_page():\n return main_page('start')\n\[email protected]('/<page>')\ndef main_page(page):\n if page == 'favicon.ico':\n abort(404)\n\n lang = requested_lang()\n effective_lang = lang\n\n if page in ['signup', 'login', 'my-profile', 'recover', 'reset', 'admin']:\n return auth_templates(page, lang, render_main_menu(page), request)\n\n if page == 'programs':\n return programs_page(request)\n\n # Default to English if requested language is not available\n if not path.isfile(f'main/{page}-{effective_lang}.md'):\n effective_lang = 'en'\n\n try:\n with open(f'main/{page}-{effective_lang}.md', 'r', encoding='utf-8') as f:\n contents = f.read()\n except IOError:\n abort(404)\n\n front_matter, markdown = split_markdown_front_matter(contents)\n\n menu = render_main_menu(page)\n return render_template('main-page.html', mkd=markdown, lang=lang, menu=menu, username=current_user(request) ['username'], auth=TRANSLATIONS.data [lang] ['Auth'], **front_matter)\n\n\ndef session_id():\n \"\"\"Returns or sets the current session ID.\"\"\"\n if 'session_id' not in session:\n if os.getenv ('IS_TEST_ENV') and 'X-session_id' in request.headers:\n session['session_id'] = request.headers ['X-session_id']\n else:\n session['session_id'] = uuid.uuid4().hex\n return session['session_id']\n\ndef requested_lang():\n \"\"\"Return the user's requested language code.\n\n If not in the request parameters, use the browser's accept-languages\n header to do language negotiation.\n \"\"\"\n lang = request.args.get(\"lang\")\n if lang: return lang\n\n return request.accept_languages.best_match(ALL_LANGUAGES.keys(), 'en')\n\[email protected]_global()\ndef current_language():\n return make_lang_obj(requested_lang())\n\[email protected]_global()\ndef hedy_link(level_nr, assignment_nr, subpage=None, lang=None):\n \"\"\"Make a link to a Hedy page.\"\"\"\n parts = [g.prefix]\n parts.append('/' + str(level_nr))\n if str(assignment_nr) != '1' or subpage:\n parts.append('/' + str(assignment_nr if assignment_nr else '1'))\n if subpage and subpage != 'code':\n parts.append('/' + subpage)\n parts.append('?')\n parts.append('lang=' + (lang if lang else requested_lang()))\n return ''.join(parts)\n\[email protected]_global()\ndef other_languages():\n cl = requested_lang()\n return [make_lang_obj(l) for l in ALL_LANGUAGES.keys() if l != cl]\n\[email protected]_global()\ndef localize_link(url):\n lang = requested_lang()\n if not lang:\n return url\n return url + '?lang=' + lang\n\ndef make_lang_obj(lang):\n \"\"\"Make a language object for a given language.\"\"\"\n return {\n 'sym': ALL_LANGUAGES[lang],\n 'lang': lang\n }\n\n\[email protected]_global()\ndef modify_query(**new_values):\n args = request.args.copy()\n\n for key, value in new_values.items():\n args[key] = value\n\n return '{}?{}'.format(request.path, url_encode(args))\n\n\ndef no_none_sense(d):\n \"\"\"Remove all None values from a dict.\"\"\"\n return {k: v for k, v in d.items() if v is not None}\n\n\ndef split_markdown_front_matter(md):\n parts = re.split('^---', md, 1, re.M)\n if len(parts) == 1:\n return {}, md\n # safe_load returns 'None' if the string is empty\n front_matter = yaml.safe_load(parts[0]) or {}\n if not isinstance(front_matter, dict):\n # There was some kind of parsing error\n return {}, md\n\n return front_matter, parts[1]\n\n\ndef render_main_menu(current_page):\n \"\"\"Render a list of (caption, href, selected, color) from the main menu.\"\"\"\n return [dict(\n caption=item.get(requested_lang(), item.get('en', '???')),\n href='/' + item['_'],\n selected=(current_page == item['_']),\n accent_color=item.get('accent_color', 'white')\n ) for item in main_menu_json['nav']]\n\n# *** PROGRAMS ***\n\[email protected]('/programs_list', methods=['GET'])\n@requires_login\ndef list_programs (user):\n return {'programs': db_get_many ('programs', {'username': user ['username']}, True)}\n\n# Not very restful to use a GET to delete something, but indeed convenient; we can do it with a single link and avoiding AJAX.\[email protected]('/programs/delete/<program_id>', methods=['GET'])\n@requires_login\ndef delete_program (user, program_id):\n result = db_get ('programs', {'id': program_id})\n if not result or result ['username'] != user ['username']:\n return \"\", 404\n db_del ('programs', {'id': program_id})\n program_count = 0\n if 'program_count' in user:\n program_count = user ['program_count']\n db_update ('users', {'username': user ['username'], 'program_count': program_count - 1})\n return redirect ('/programs')\n\[email protected]('/programs', methods=['POST'])\n@requires_login\ndef save_program (user):\n\n body = request.json\n if not type_check (body, 'dict'):\n return 'body must be an object', 400\n if not object_check (body, 'code', 'str'):\n return 'code must be a string', 400\n if not object_check (body, 'name', 'str'):\n return 'name must be a string', 400\n if not object_check (body, 'level', 'int'):\n return 'level must be an integer', 400\n if 'adventure_name' in body:\n if not object_check (body, 'adventure_name', 'str'):\n return 'if present, adventure_name must be a string', 400\n\n # We execute the saved program to see if it would generate an error or not\n error = None\n try:\n hedy_errors = TRANSLATIONS.get_translations(requested_lang(), 'HedyErrorMessages')\n result = hedy.transpile(body ['code'], body ['level'])\n except hedy.HedyException as E:\n error_template = hedy_errors[E.error_code]\n error = error_template.format(**E.arguments)\n except Exception as E:\n error = str(E)\n\n name = body ['name']\n\n # If name ends with (N) or (NN), we strip them since it's very likely these addenda were added by our server to avoid overwriting existing programs.\n name = re.sub (' \\(\\d+\\)$', '', name)\n # We check if a program with a name `xyz` exists in the database for the username. If it does, we exist whether `xyz (1)` exists, until we find a program `xyz (NN)` that doesn't exist yet.\n # It'd be ideal to search by username & program name, but since DynamoDB doesn't allow searching for two indexes at the same time, this would require to create a special index to that effect, which is cumbersome.\n # For now, we bring all existing programs for the user and then search within them for repeated names.\n existing = db_get_many ('programs', {'username': user ['username']}, True)\n name_counter = 0\n for program in existing:\n if re.match ('^' + re.escape (name) + '( \\(\\d+\\))*', program ['name']):\n name_counter = name_counter + 1\n if name_counter:\n name = name + ' (' + str (name_counter) + ')'\n\n stored_program = {\n 'id': uuid.uuid4().hex,\n 'session': session_id(),\n 'date': timems (),\n 'lang': requested_lang(),\n 'version': version(),\n 'level': body ['level'],\n 'code': body ['code'],\n 'name': name,\n 'server_error': error,\n 'username': user ['username']\n }\n\n if 'adventure_name' in body:\n stored_program ['adventure_name'] = body ['adventure_name']\n\n db_create('programs', stored_program)\n\n program_count = 0\n if 'program_count' in user:\n program_count = user ['program_count']\n db_update('users', {'username': user ['username'], 'program_count': program_count + 1})\n\n return jsonify({'name': name})\n\[email protected]('/translate/<source>/<target>')\ndef translate_fromto(source, target):\n # FIXME: right now loading source file on demand. We might need to cache this...\n source_adventures = load_yaml(f'coursedata/adventures/{source}.yaml')\n source_levels = load_yaml(f'coursedata/level-defaults/{source}.yaml')\n source_texts = load_yaml(f'coursedata/texts/{source}.yaml')\n\n target_adventures = load_yaml(f'coursedata/adventures/{target}.yaml')\n target_levels = load_yaml(f'coursedata/level-defaults/{target}.yaml')\n target_texts = load_yaml(f'coursedata/texts/{target}.yaml')\n\n files = []\n\n files.append(translating.TranslatableFile(\n 'Levels',\n f'level-defaults/{target}.yaml',\n translating.struct_to_sections(source_levels, target_levels)))\n\n files.append(translating.TranslatableFile(\n 'Messages',\n f'texts/{target}.yaml',\n translating.struct_to_sections(source_texts, target_texts)))\n\n files.append(translating.TranslatableFile(\n 'Adventures',\n f'adventures/{target}.yaml',\n translating.struct_to_sections(source_adventures, target_adventures)))\n\n return render_template('translate-fromto.html',\n source_lang=source,\n target_lang=target,\n files=files)\n\[email protected]('/update_yaml', methods=['POST'])\ndef update_yaml():\n filename = path.join('coursedata', request.form['file'])\n # The file MUST point to something inside our 'coursedata' directory\n # (no exploiting bullshit here)\n filepath = path.abspath(filename)\n expected_path = path.abspath('coursedata')\n if not filepath.startswith(expected_path):\n raise RuntimeError('Are you trying to trick me?')\n\n data = load_yaml_rt(filepath)\n for key, value in request.form.items():\n if key.startswith('c:'):\n translating.apply_form_change(data, key[2:], translating.normalize_newlines(value))\n\n data = translating.normalize_yaml_blocks(data)\n\n return Response(dump_yaml_rt(data),\n mimetype='application/x-yaml',\n headers={'Content-disposition': 'attachment; filename=' + request.form['file'].replace('/', '-')})\n\n\n# *** AUTH ***\n\nimport auth\nauth.routes (app, requested_lang)\n\n# *** START SERVER ***\n\nif __name__ == '__main__':\n # Start the server on a developer machine. Flask is initialized in DEBUG mode, so it\n # hot-reloads files. We also flip our own internal \"debug mode\" flag to True, so our\n # own file loading routines also hot-reload.\n utils.set_debug_mode(True)\n\n # Threaded option enables multiple instances for multiple user access support\n app.run(threaded=True, debug=True, port=config ['port'], host=\"0.0.0.0\")\n\n # See `Procfile` for how the server is started on Heroku.\n",
"path": "app.py"
}
] | [
{
"content": "import sys\nif (sys.version_info.major < 3 or sys.version_info.minor < 6):\n print ('Hedy requires Python 3.6 or newer to run. However, your version of Python is', '.'.join ([str (sys.version_info.major), str (sys.version_info.minor), str (sys.version_info.micro)]))\n quit ()\n\n# coding=utf-8\nimport datetime\nimport collections\nimport hedy\nimport json\nimport jsonbin\nimport logging\nimport os\nfrom os import path\nimport re\nimport traceback\nimport uuid\nfrom ruamel import yaml\nfrom flask_commonmark import Commonmark\nfrom werkzeug.urls import url_encode\nfrom config import config\nfrom auth import auth_templates, current_user, requires_login, is_admin, is_teacher\nfrom utils import db_get, db_get_many, db_create, db_update, timems, type_check, object_check, db_del, load_yaml, load_yaml_rt, dump_yaml_rt, version\nimport utils\n\n# app.py\nfrom flask import Flask, request, jsonify, session, abort, g, redirect, Response\nfrom flask_helpers import render_template\nfrom flask_compress import Compress\n\n# Hedy-specific modules\nimport courses\nimport hedyweb\nimport translating\nimport querylog\nimport aws_helpers\nimport ab_proxying\nimport cdn\n\n# Set the current directory to the root Hedy folder\nos.chdir(os.path.join (os.getcwd (), __file__.replace (os.path.basename (__file__), '')))\n\n# Define and load all available language data\nALL_LANGUAGES = {\n 'en': 'English',\n 'nl': 'Nederlands',\n 'es': 'Español',\n 'fr': 'Français',\n 'pt_br': 'Português',\n 'de': 'Deutsch',\n 'it': 'Italiano',\n 'sw': 'Swahili',\n 'hu': 'Magyar',\n 'el': 'Ελληνικά',\n \"zh\": \"简体中文\"\n}\n\nLEVEL_DEFAULTS = collections.defaultdict(courses.NoSuchDefaults)\nfor lang in ALL_LANGUAGES.keys():\n LEVEL_DEFAULTS[lang] = courses.LevelDefaults(lang)\n\nHEDY_COURSE = collections.defaultdict(courses.NoSuchCourse)\nfor lang in ALL_LANGUAGES.keys():\n HEDY_COURSE[lang] = courses.Course('hedy', lang, LEVEL_DEFAULTS[lang])\n\nSPACE_EU_COURSE = {'nl': courses.Course('space_eu', 'nl', LEVEL_DEFAULTS['nl']),\n 'en': courses.Course('space_eu', 'en', LEVEL_DEFAULTS['en']),\n 'es': courses.Course('space_eu', 'es', LEVEL_DEFAULTS['es'])\n }\n\nONLINE_MASTERS_COURSE = courses.Course('online_masters', 'nl', LEVEL_DEFAULTS['nl'])\n\nTRANSLATIONS = hedyweb.Translations()\n\ndef load_adventures_in_all_languages():\n adventures = {}\n for lang in ALL_LANGUAGES.keys ():\n adventures[lang] = load_yaml(f'coursedata/adventures/{lang}.yaml')\n return adventures\n\n\ndef load_adventure_for_language(lang):\n adventures = load_adventures_in_all_languages()\n if not lang in adventures or len (adventures [lang]) == 0:\n return adventures ['en']\n return adventures [lang]\n\n\ndef load_adventure_assignments_per_level(lang, level):\n\n loaded_programs = {}\n # If user is logged in, we iterate their programs that belong to the current level. Out of these, we keep the latest created program for both the level mode (no adventure) and for each of the adventures.\n if current_user (request) ['username']:\n user_programs = db_get_many ('programs', {'username': current_user (request) ['username']}, True)\n for program in user_programs:\n if program ['level'] != level:\n continue\n program_key = 'level' if not program.get ('adventure_name') else program ['adventure_name']\n if not program_key in loaded_programs:\n loaded_programs [program_key] = program\n elif loaded_programs [program_key] ['date'] < program ['date']:\n loaded_programs [program_key] = program\n\n assignments = []\n adventures = load_adventure_for_language(lang)['adventures']\n for short_name, adventure in adventures.items ():\n if not level in adventure['levels']:\n continue\n assignments.append({\n 'short_name': short_name,\n 'name': adventure['name'],\n 'image': adventure.get('image', None),\n 'default_save_name': adventure['default_save_name'],\n 'text': adventure['levels'][level].get('story_text', 'No Story Text'),\n 'start_code': adventure['levels'][level].get ('start_code', ''),\n 'loaded_program': '' if not loaded_programs.get (short_name) else loaded_programs.get (short_name) ['code'],\n 'loaded_program_name': '' if not loaded_programs.get (short_name) else loaded_programs.get (short_name) ['name']\n })\n # We create a 'level' pseudo assignment to store the loaded program for level mode, if any.\n assignments.append({\n 'short_name': 'level',\n 'loaded_program': '' if not loaded_programs.get ('level') else loaded_programs.get ('level') ['code'],\n 'loaded_program_name': '' if not loaded_programs.get ('level') else loaded_programs.get ('level') ['name']\n })\n return assignments\n\n# Load main menu (do it once, can be cached)\nwith open(f'main/menu.json', 'r', encoding='utf-8') as f:\n main_menu_json = json.load(f)\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format='[%(asctime)s] %(levelname)-8s: %(message)s')\n\n\napp = Flask(__name__, static_url_path='')\n# Ignore trailing slashes in URLs\napp.url_map.strict_slashes = False\n\ncdn.Cdn(app, os.getenv('CDN_PREFIX'), os.getenv('HEROKU_SLUG_COMMIT', 'dev'))\n\n# Set session id if not already set. This must be done as one of the first things,\n# so the function should be defined high up.\[email protected]_request\ndef set_session_cookie():\n session_id()\n\nif os.getenv('IS_PRODUCTION'):\n @app.before_request\n def reject_e2e_requests():\n if utils.is_testing_request (request):\n return 'No E2E tests are allowed in production', 400\n\[email protected]_request\ndef before_request_proxy_testing():\n if utils.is_testing_request (request):\n if os.getenv ('IS_TEST_ENV'):\n session ['test_session'] = 'test'\n\n# HTTP -> HTTPS redirect\n# https://stackoverflow.com/questions/32237379/python-flask-redirect-to-https-from-http/32238093\nif os.getenv ('REDIRECT_HTTP_TO_HTTPS'):\n @app.before_request\n def before_request_https():\n if request.url.startswith('http://'):\n url = request.url.replace('http://', 'https://', 1)\n # We use a 302 in case we need to revert the redirect.\n return redirect(url, code=302)\n\n# Unique random key for sessions.\n# For settings with multiple workers, an environment variable is required, otherwise cookies will be constantly removed and re-set by different workers.\nif utils.is_production():\n if not os.getenv ('SECRET_KEY'):\n raise RuntimeError('The SECRET KEY must be provided for non-dev environments.')\n\n app.config['SECRET_KEY'] = os.getenv ('SECRET_KEY')\n\nelse:\n app.config['SECRET_KEY'] = os.getenv ('SECRET_KEY', uuid.uuid4().hex)\n\nif utils.is_heroku():\n app.config.update(\n SESSION_COOKIE_SECURE=True,\n SESSION_COOKIE_HTTPONLY=True,\n SESSION_COOKIE_SAMESITE='Lax',\n )\n\n# Set security attributes for cookies in a central place - but not when running locally, so that session cookies work well without HTTPS\n\nCompress(app)\nCommonmark(app)\nlogger = jsonbin.JsonBinLogger.from_env_vars()\nquerylog.LOG_QUEUE.set_transmitter(aws_helpers.s3_transmitter_from_env())\n\n# Check that requested language is supported, otherwise return 404\[email protected]_request\ndef check_language():\n if requested_lang() not in ALL_LANGUAGES.keys ():\n return \"Language \" + requested_lang () + \" not supported\", 404\n\nif utils.is_heroku() and not os.getenv('HEROKU_RELEASE_CREATED_AT'):\n logging.warning('Cannot determine release; enable Dyno metadata by running \"heroku labs:enable runtime-dyno-metadata -a <APP_NAME>\"')\n\n\[email protected]_request\ndef before_request_begin_logging():\n querylog.begin_global_log_record(path=request.path, method=request.method)\n\[email protected]_request\ndef after_request_log_status(response):\n querylog.log_value(http_code=response.status_code)\n return response\n\[email protected]_request\ndef teardown_request_finish_logging(exc):\n querylog.finish_global_log_record(exc)\n\n# If present, PROXY_TO_TEST_HOST should be the 'http[s]://hostname[:port]' of the target environment\nif os.getenv ('PROXY_TO_TEST_HOST') and not os.getenv ('IS_TEST_ENV'):\n ab_proxying.ABProxying(app, os.getenv ('PROXY_TO_TEST_HOST'), app.config['SECRET_KEY'])\n\[email protected]('/session_test', methods=['GET'])\ndef echo_session_vars_test():\n if not utils.is_testing_request (request):\n return 'This endpoint is only meant for E2E tests', 400\n return jsonify({'session': dict(session)})\n\[email protected]('/session_main', methods=['GET'])\ndef echo_session_vars_main():\n if not utils.is_testing_request (request):\n return 'This endpoint is only meant for E2E tests', 400\n return jsonify({'session': dict(session), 'proxy_enabled': bool (os.getenv ('PROXY_TO_TEST_HOST'))})\n\[email protected]('/parse', methods=['POST'])\ndef parse():\n body = request.json\n if not body:\n return \"body must be an object\", 400\n if 'code' not in body:\n return \"body.code must be a string\", 400\n if 'level' not in body:\n return \"body.level must be a string\", 400\n if 'sublevel' in body and not type_check (body ['sublevel'], 'int'):\n return \"If present, body.sublevel must be an integer\", 400\n if 'adventure_name' in body and not type_check (body ['adventure_name'], 'str'):\n return \"if present, body.adventure_name must be a string\", 400\n\n code = body ['code']\n level = int(body ['level'])\n sublevel = body.get ('sublevel') or 0\n\n # Language should come principally from the request body,\n # but we'll fall back to browser default if it's missing for whatever\n # reason.\n lang = body.get('lang', requested_lang())\n\n response = {}\n username = current_user(request) ['username'] or None\n\n querylog.log_value(level=level, lang=lang, session_id=session_id(), username=username)\n\n # Check if user sent code\n if not code:\n response[\"Error\"] = \"no code found, please send code.\"\n # is so, parse\n else:\n try:\n hedy_errors = TRANSLATIONS.get_translations(lang, 'HedyErrorMessages')\n with querylog.log_time('transpile'):\n result = hedy.transpile(code, level,sublevel)\n response[\"Code\"] = \"# coding=utf8\\nimport random\\n\" + result\n except hedy.HedyException as E:\n traceback.print_exc()\n # some 'errors' can be fixed, for these we throw an exception, but also\n # return fixed code, so it can be ran\n if E.args[0] == \"Invalid Space\":\n error_template = hedy_errors[E.error_code]\n response[\"Code\"] = \"# coding=utf8\\n\" + E.arguments['fixed_code']\n response[\"Warning\"] = error_template.format(**E.arguments)\n elif E.args[0] == \"Parse\":\n error_template = hedy_errors[E.error_code]\n # Localize the names of characters\n if 'character_found' in E.arguments:\n E.arguments['character_found'] = hedy_errors[E.arguments['character_found']]\n response[\"Error\"] = error_template.format(**E.arguments)\n elif E.args[0] == \"Unquoted Text\":\n error_template = hedy_errors[E.error_code]\n response[\"Error\"] = error_template.format(**E.arguments)\n else:\n error_template = hedy_errors[E.error_code]\n response[\"Error\"] = error_template.format(**E.arguments)\n except Exception as E:\n traceback.print_exc()\n print(f\"error transpiling {code}\")\n response[\"Error\"] = str(E)\n querylog.log_value(server_error=response.get('Error'))\n logger.log ({\n 'session': session_id(),\n 'date': str(datetime.datetime.now()),\n 'level': level,\n 'lang': lang,\n 'code': code,\n 'server_error': response.get('Error'),\n 'version': version(),\n 'username': username,\n 'is_test': 1 if os.getenv ('IS_TEST_ENV') else None,\n 'adventure_name': body.get('adventure_name', None)\n })\n\n return jsonify(response)\n\[email protected]('/report_error', methods=['POST'])\ndef report_error():\n post_body = request.json\n\n logger.log ({\n 'session': session_id(),\n 'date': str(datetime.datetime.now()),\n 'level': post_body.get('level'),\n 'code': post_body.get('code'),\n 'client_error': post_body.get('client_error'),\n 'version': version(),\n 'username': current_user(request) ['username'] or None,\n 'is_test': 1 if os.getenv ('IS_TEST_ENV') else None\n })\n\n return 'logged'\n\[email protected]('/version', methods=['GET'])\ndef version_page():\n \"\"\"\n Generate a page with some diagnostic information and a useful GitHub URL on upcoming changes.\n\n This is an admin-only page, it does not need to be linked.\n (Also does not have any sensitive information so it's fine to be unauthenticated).\n \"\"\"\n app_name = os.getenv('HEROKU_APP_NAME')\n\n vrz = os.getenv('HEROKU_RELEASE_CREATED_AT')\n the_date = datetime.date.fromisoformat(vrz[:10]) if vrz else datetime.date.today()\n\n commit = os.getenv('HEROKU_SLUG_COMMIT', '????')[0:6]\n\n return render_template('version-page.html',\n app_name=app_name,\n heroku_release_time=the_date,\n commit=commit)\n\n\ndef programs_page (request):\n username = current_user(request) ['username']\n if not username:\n return \"unauthorized\", 403\n\n from_user = request.args.get('user') or None\n if from_user and not is_admin (request):\n return \"unauthorized\", 403\n\n texts=TRANSLATIONS.data [requested_lang ()] ['Programs']\n ui=TRANSLATIONS.data [requested_lang ()] ['ui']\n adventures = load_adventure_for_language(requested_lang ())['adventures']\n\n result = db_get_many ('programs', {'username': from_user or username}, True)\n programs = []\n now = timems ()\n for item in result:\n measure = texts ['minutes']\n date = round ((now - item ['date']) / 60000)\n if date > 90:\n measure = texts ['hours']\n date = round (date / 60)\n if date > 36:\n measure = texts ['days']\n\n date = round (date / 24)\n\n programs.append ({'id': item ['id'], 'code': item ['code'], 'date': texts ['ago-1'] + ' ' + str (date) + ' ' + measure + ' ' + texts ['ago-2'], 'level': item ['level'], 'name': item ['name'], 'adventure_name': item.get ('adventure_name')})\n\n return render_template('programs.html', lang=requested_lang(), menu=render_main_menu('programs'), texts=texts, ui=ui, auth=TRANSLATIONS.data [requested_lang ()] ['Auth'], programs=programs, username=username, current_page='programs', from_user=from_user, adventures=adventures)\n\n# Adventure mode\[email protected]('/hedy/adventures', methods=['GET'])\ndef adventures_list():\n return render_template('adventures.html', lang=lang, adventures=load_adventure_for_language (requested_lang ()), menu=render_main_menu('adventures'), username=current_user(request) ['username'], auth=TRANSLATIONS.data [lang] ['Auth'])\n\[email protected]('/hedy/adventures/<adventure_name>', methods=['GET'], defaults={'level': 1})\[email protected]('/hedy/adventures/<adventure_name>/<level>', methods=['GET'])\ndef adventure_page(adventure_name, level):\n\n user = current_user (request)\n level = int (level)\n adventures = load_adventure_for_language (requested_lang ())\n\n # If requested adventure does not exist, return 404\n if not adventure_name in adventures ['adventures']:\n return 'No such Hedy adventure!', 404\n\n adventure = adventures ['adventures'] [adventure_name]\n\n # If no level is specified (this will happen if the last element of the path (minus the query parameter) is the same as the adventure_name)\n if re.sub (r'\\?.+', '', request.url.split ('/') [len (request.url.split ('/')) - 1]) == adventure_name:\n # If user is logged in, check if they have a program for this adventure\n # If there are many, note the highest level for which there is a saved program\n desired_level = 0\n if user ['username']:\n existing_programs = db_get_many ('programs', {'username': user ['username']}, True)\n for program in existing_programs:\n if 'adventure_name' in program and program ['adventure_name'] == adventure_name and program ['level'] > desired_level:\n desired_level = program ['level']\n # If the user has a saved program for this adventure, redirect them to the level with the highest adventure\n if desired_level != 0:\n return redirect(request.url.replace ('/' + adventure_name, '/' + adventure_name + '/' + str (desired_level)), code=302)\n # If user is not logged in, or has no saved programs for this adventure, default to the lowest level available for the adventure\n if desired_level == 0:\n for key in adventure ['levels'].keys ():\n if type_check (key, 'int') and (desired_level == 0 or desired_level > key):\n desired_level = key\n level = desired_level\n\n # If requested level is not in adventure, return 404\n if not level in adventure ['levels']:\n abort(404)\n\n adventure_assignments = load_adventure_assignments_per_level(requested_lang(), level)\n g.prefix = '/hedy'\n return hedyweb.render_assignment_editor(\n request=request,\n course=HEDY_COURSE[requested_lang()],\n level_number=level,\n assignment_number=1,\n menu=render_main_menu('hedy'),\n translations=TRANSLATIONS,\n version=version(),\n adventure_assignments=adventure_assignments,\n # The relevant loaded program will be available to client-side js and it will be loaded by js.\n loaded_program='',\n loaded_program_name='',\n adventure_name=adventure_name)\n\n# routing to index.html\[email protected]('/hedy', methods=['GET'], defaults={'level': '1', 'step': 1})\[email protected]('/hedy/<level>', methods=['GET'], defaults={'step': 1})\[email protected]('/hedy/<level>/<step>', methods=['GET'])\ndef index(level, step):\n\n\n # Sublevel requested\n if re.match ('\\d+-\\d+', level):\n pass\n # If level has a dash, we keep it as a string\n # Normal level requested\n elif re.match ('\\d', level):\n try:\n g.level = level = int(level)\n except:\n return 'No such Hedy level!', 404\n else:\n return 'No such Hedy level!', 404\n\n g.lang = requested_lang()\n g.prefix = '/hedy'\n\n loaded_program = ''\n loaded_program_name = ''\n adventure_name = ''\n\n # If step is a string that has more than two characters, it must be an id of a program\n if step and type_check (step, 'str') and len (step) > 2:\n result = db_get ('programs', {'id': step})\n if not result:\n return 'No such program', 404\n # Allow only the owner of the program, the admin user and the teacher users to access the program\n user = current_user (request)\n if user ['username'] != result ['username'] and not is_admin (request) and not is_teacher (request):\n return 'No such program!', 404\n loaded_program = result ['code']\n loaded_program_name = result ['name']\n if 'adventure_name' in result:\n adventure_name = result ['adventure_name']\n # We default to step 1 to provide a meaningful default assignment\n step = 1\n\n adventure_assignments = load_adventure_assignments_per_level(g.lang, level)\n\n return hedyweb.render_assignment_editor(\n request=request,\n course=HEDY_COURSE[g.lang],\n level_number=level,\n assignment_number=step,\n menu=render_main_menu('hedy'),\n translations=TRANSLATIONS,\n version=version(),\n adventure_assignments=adventure_assignments,\n loaded_program=loaded_program,\n loaded_program_name=loaded_program_name,\n adventure_name=adventure_name)\n\[email protected]('/onlinemasters', methods=['GET'], defaults={'level': 1, 'step': 1})\[email protected]('/onlinemasters/<level>', methods=['GET'], defaults={'step': 1})\[email protected]('/onlinemasters/<level>/<step>', methods=['GET'])\ndef onlinemasters(level, step):\n g.level = level = int(level)\n g.lang = lang = requested_lang()\n g.prefix = '/onlinemasters'\n\n adventure_assignments = load_adventure_assignments_per_level(g.lang, level)\n\n return hedyweb.render_assignment_editor(\n request=request,\n course=ONLINE_MASTERS_COURSE,\n level_number=level,\n assignment_number=step,\n translations=TRANSLATIONS,\n version=version(),\n menu=None,\n adventure_assignments=adventure_assignments,\n loaded_program='',\n loaded_program_name='',\n adventure_name='')\n\[email protected]('/space_eu', methods=['GET'], defaults={'level': 1, 'step': 1})\[email protected]('/space_eu/<level>', methods=['GET'], defaults={'step': 1})\[email protected]('/space_eu/<level>/<step>', methods=['GET'])\ndef space_eu(level, step):\n g.level = level = int(level)\n g.lang = requested_lang()\n g.prefix = '/space_eu'\n\n adventure_assignments = load_adventure_assignments_per_level(g.lang, level)\n\n return hedyweb.render_assignment_editor(\n request=request,\n course=SPACE_EU_COURSE[g.lang],\n level_number=level,\n assignment_number=step,\n translations=TRANSLATIONS,\n version=version(),\n menu=None,\n adventure_assignments=adventure_assignments,\n loaded_program='',\n loaded_program_name='',\n adventure_name='')\n\n\n\[email protected]('/error_messages.js', methods=['GET'])\ndef error():\n error_messages = TRANSLATIONS.get_translations(requested_lang(), \"ClientErrorMessages\")\n return render_template(\"error_messages.js\", error_messages=json.dumps(error_messages))\n\n\[email protected](500)\ndef internal_error(exception):\n import traceback\n print(traceback.format_exc())\n return \"<h1>500 Internal Server Error</h1>\", 500\n\[email protected]('/index.html')\[email protected]('/')\ndef default_landing_page():\n return main_page('start')\n\[email protected]('/<page>')\ndef main_page(page):\n if page == 'favicon.ico':\n abort(404)\n\n lang = requested_lang()\n effective_lang = lang\n\n if page in ['signup', 'login', 'my-profile', 'recover', 'reset', 'admin']:\n return auth_templates(page, lang, render_main_menu(page), request)\n\n if page == 'programs':\n return programs_page(request)\n\n # Default to English if requested language is not available\n if not path.isfile(f'main/{page}-{effective_lang}.md'):\n effective_lang = 'en'\n\n try:\n with open(f'main/{page}-{effective_lang}.md', 'r', encoding='utf-8') as f:\n contents = f.read()\n except IOError:\n abort(404)\n\n front_matter, markdown = split_markdown_front_matter(contents)\n\n menu = render_main_menu(page)\n return render_template('main-page.html', mkd=markdown, lang=lang, menu=menu, username=current_user(request) ['username'], auth=TRANSLATIONS.data [lang] ['Auth'], **front_matter)\n\n\ndef session_id():\n \"\"\"Returns or sets the current session ID.\"\"\"\n if 'session_id' not in session:\n if os.getenv ('IS_TEST_ENV') and 'X-session_id' in request.headers:\n session['session_id'] = request.headers ['X-session_id']\n else:\n session['session_id'] = uuid.uuid4().hex\n return session['session_id']\n\ndef requested_lang():\n \"\"\"Return the user's requested language code.\n\n If not in the request parameters, use the browser's accept-languages\n header to do language negotiation.\n \"\"\"\n lang = request.args.get(\"lang\")\n if lang: return lang\n\n return request.accept_languages.best_match(ALL_LANGUAGES.keys(), 'en')\n\[email protected]_global()\ndef current_language():\n return make_lang_obj(requested_lang())\n\[email protected]_global()\ndef hedy_link(level_nr, assignment_nr, subpage=None, lang=None):\n \"\"\"Make a link to a Hedy page.\"\"\"\n parts = [g.prefix]\n parts.append('/' + str(level_nr))\n if str(assignment_nr) != '1' or subpage:\n parts.append('/' + str(assignment_nr if assignment_nr else '1'))\n if subpage and subpage != 'code':\n parts.append('/' + subpage)\n parts.append('?')\n parts.append('lang=' + (lang if lang else requested_lang()))\n return ''.join(parts)\n\[email protected]_global()\ndef other_languages():\n cl = requested_lang()\n return [make_lang_obj(l) for l in ALL_LANGUAGES.keys() if l != cl]\n\[email protected]_global()\ndef localize_link(url):\n lang = requested_lang()\n if not lang:\n return url\n return url + '?lang=' + lang\n\ndef make_lang_obj(lang):\n \"\"\"Make a language object for a given language.\"\"\"\n return {\n 'sym': ALL_LANGUAGES[lang],\n 'lang': lang\n }\n\n\[email protected]_global()\ndef modify_query(**new_values):\n args = request.args.copy()\n\n for key, value in new_values.items():\n args[key] = value\n\n return '{}?{}'.format(request.path, url_encode(args))\n\n\ndef no_none_sense(d):\n \"\"\"Remove all None values from a dict.\"\"\"\n return {k: v for k, v in d.items() if v is not None}\n\n\ndef split_markdown_front_matter(md):\n parts = re.split('^---', md, 1, re.M)\n if len(parts) == 1:\n return {}, md\n # safe_load returns 'None' if the string is empty\n front_matter = yaml.safe_load(parts[0]) or {}\n if not isinstance(front_matter, dict):\n # There was some kind of parsing error\n return {}, md\n\n return front_matter, parts[1]\n\n\ndef render_main_menu(current_page):\n \"\"\"Render a list of (caption, href, selected, color) from the main menu.\"\"\"\n return [dict(\n caption=item.get(requested_lang(), item.get('en', '???')),\n href='/' + item['_'],\n selected=(current_page == item['_']),\n accent_color=item.get('accent_color', 'white')\n ) for item in main_menu_json['nav']]\n\n# *** PROGRAMS ***\n\[email protected]('/programs_list', methods=['GET'])\n@requires_login\ndef list_programs (user):\n return {'programs': db_get_many ('programs', {'username': user ['username']}, True)}\n\n# Not very restful to use a GET to delete something, but indeed convenient; we can do it with a single link and avoiding AJAX.\[email protected]('/programs/delete/<program_id>', methods=['GET'])\n@requires_login\ndef delete_program (user, program_id):\n result = db_get ('programs', {'id': program_id})\n if not result or result ['username'] != user ['username']:\n return \"\", 404\n db_del ('programs', {'id': program_id})\n program_count = 0\n if 'program_count' in user:\n program_count = user ['program_count']\n db_update ('users', {'username': user ['username'], 'program_count': program_count - 1})\n return redirect ('/programs')\n\[email protected]('/programs', methods=['POST'])\n@requires_login\ndef save_program (user):\n\n body = request.json\n if not type_check (body, 'dict'):\n return 'body must be an object', 400\n if not object_check (body, 'code', 'str'):\n return 'code must be a string', 400\n if not object_check (body, 'name', 'str'):\n return 'name must be a string', 400\n if not object_check (body, 'level', 'int'):\n return 'level must be an integer', 400\n if 'adventure_name' in body:\n if not object_check (body, 'adventure_name', 'str'):\n return 'if present, adventure_name must be a string', 400\n\n # We execute the saved program to see if it would generate an error or not\n error = None\n try:\n hedy_errors = TRANSLATIONS.get_translations(requested_lang(), 'HedyErrorMessages')\n result = hedy.transpile(body ['code'], body ['level'])\n except hedy.HedyException as E:\n error_template = hedy_errors[E.error_code]\n error = error_template.format(**E.arguments)\n except Exception as E:\n error = str(E)\n\n name = body ['name']\n\n # If name ends with (N) or (NN), we strip them since it's very likely these addenda were added by our server to avoid overwriting existing programs.\n name = re.sub (' \\(\\d+\\)$', '', name)\n # We check if a program with a name `xyz` exists in the database for the username. If it does, we exist whether `xyz (1)` exists, until we find a program `xyz (NN)` that doesn't exist yet.\n # It'd be ideal to search by username & program name, but since DynamoDB doesn't allow searching for two indexes at the same time, this would require to create a special index to that effect, which is cumbersome.\n # For now, we bring all existing programs for the user and then search within them for repeated names.\n existing = db_get_many ('programs', {'username': user ['username']}, True)\n name_counter = 0\n for program in existing:\n if re.match ('^' + re.escape (name) + '( \\(\\d+\\))*', program ['name']):\n name_counter = name_counter + 1\n if name_counter:\n name = name + ' (' + str (name_counter) + ')'\n\n stored_program = {\n 'id': uuid.uuid4().hex,\n 'session': session_id(),\n 'date': timems (),\n 'lang': requested_lang(),\n 'version': version(),\n 'level': body ['level'],\n 'code': body ['code'],\n 'name': name,\n 'server_error': error,\n 'username': user ['username']\n }\n\n if 'adventure_name' in body:\n stored_program ['adventure_name'] = body ['adventure_name']\n\n db_create('programs', stored_program)\n\n program_count = 0\n if 'program_count' in user:\n program_count = user ['program_count']\n db_update('users', {'username': user ['username'], 'program_count': program_count + 1})\n\n return jsonify({'name': name})\n\[email protected]('/translate/<source>/<target>')\ndef translate_fromto(source, target):\n # FIXME: right now loading source file on demand. We might need to cache this...\n source_adventures = load_yaml(f'coursedata/adventures/{source}.yaml')\n source_levels = load_yaml(f'coursedata/level-defaults/{source}.yaml')\n source_texts = load_yaml(f'coursedata/texts/{source}.yaml')\n\n target_adventures = load_yaml(f'coursedata/adventures/{target}.yaml')\n target_levels = load_yaml(f'coursedata/level-defaults/{target}.yaml')\n target_texts = load_yaml(f'coursedata/texts/{target}.yaml')\n\n files = []\n\n files.append(translating.TranslatableFile(\n 'Levels',\n f'level-defaults/{target}.yaml',\n translating.struct_to_sections(source_levels, target_levels)))\n\n files.append(translating.TranslatableFile(\n 'Messages',\n f'texts/{target}.yaml',\n translating.struct_to_sections(source_texts, target_texts)))\n\n files.append(translating.TranslatableFile(\n 'Adventures',\n f'adventures/{target}.yaml',\n translating.struct_to_sections(source_adventures, target_adventures)))\n\n return render_template('translate-fromto.html',\n source_lang=source,\n target_lang=target,\n files=files)\n\[email protected]('/update_yaml', methods=['POST'])\ndef update_yaml():\n filename = path.join('coursedata', request.form['file'])\n # The file MUST point to something inside our 'coursedata' directory\n # (no exploiting bullshit here)\n filepath = path.abspath(filename)\n expected_path = path.abspath('coursedata')\n if not filepath.startswith(expected_path):\n raise RuntimeError('Are you trying to trick me?')\n\n data = load_yaml_rt(filepath)\n for key, value in request.form.items():\n if key.startswith('c:'):\n translating.apply_form_change(data, key[2:], translating.normalize_newlines(value))\n\n data = translating.normalize_yaml_blocks(data)\n\n return Response(dump_yaml_rt(data),\n mimetype='application/x-yaml',\n headers={'Content-disposition': 'attachment; filename=' + request.form['file'].replace('/', '-')})\n\n\n# *** AUTH ***\n\nimport auth\nauth.routes (app, requested_lang)\n\n# *** START SERVER ***\n\nif __name__ == '__main__':\n # Start the server on a developer machine. Flask is initialized in DEBUG mode, so it\n # hot-reloads files. We also flip our own internal \"debug mode\" flag to True, so our\n # own file loading routines also hot-reload.\n utils.set_debug_mode(True)\n\n # Threaded option enables multiple instances for multiple user access support\n app.run(threaded=True, debug=True, port=config ['port'], host=\"0.0.0.0\")\n\n # See `Procfile` for how the server is started on Heroku.\n",
"path": "app.py"
}
] | diff --git a/app.py b/app.py
index 723ad80d692..b14a2004533 100644
--- a/app.py
+++ b/app.py
@@ -553,7 +553,7 @@ def error():
def internal_error(exception):
import traceback
print(traceback.format_exc())
- return "<h1>500 Internal Server Error</h1>"
+ return "<h1>500 Internal Server Error</h1>", 500
@app.route('/index.html')
@app.route('/')
|
wagtail__wagtail-11660 | Wagtail Documentation favicon icon is missing (Not Found)
<!--
Summarise the documentation change you’re suggesting in the Issue title.
-->
### Pertinent section of the Wagtail docs
<!--
Copy the section link here.
-->
https://docs.wagtail.org/en/stable/getting_started/index.html
https://docs.wagtail.org/en/stable/getting_started/tutorial.html
https://docs.wagtail.org/en/stable/reference/index.html
https://docs.wagtail.org/en/stable/reference/pages/index.html
The issue persists in all the pages of documentation.
### Details
<!--
Provide a clear and concise description of what you want to happen.
-->
Wagtail has a nice favicon as per the logo which is displayed fine on this page of the documentation https://docs.wagtail.org/en/stable/
But on all the other pages the favicon is missing and not showing on the tab of chrome or any other browser tabs. When I checked the page source I found the favicon.ico is linked via `<link rel="shortcut icon" href="../../_static/favicon.ico" />` and this is going to https://docs.wagtail.org/en/_static/favicon.ico which is Not Found!
When I checked other sources for example CSS or logo image I found that is sourced like `src="../_static/img/wagtail-logo-new.svg` and takes to https://docs.wagtail.org/en/stable/_static/img/wagtail-logo-new.svg which is correct.
The difference between the favicon going 404 and the logo being available is that the favicon icon source is '../../_static' with an extra `../` which needs to be removed.
<img src="https://img001.prntscr.com/file/img001/zEYpfzNSQHqssOSc2_naxg.png" width="500">
<!--
If you're suggesting a very specific change to the documentation, feel free to directly submit a pull request.
-->
### Working on this
<!--
Do you have thoughts on skills needed?
Are you keen to work on this yourself once the issue has been accepted?
Please let us know here.
-->
It's a very minor fix and I already described the issue above. I could fix it but I am not sure exactly where in the documentation this favicon is coming from.
Anyone can contribute to this. View our [contributing guidelines](https://docs.wagtail.org/en/latest/contributing/index.html), add a comment to the issue once you’re ready to start.
| [
{
"content": "#!/usr/bin/env python\n\nfrom wagtail import __version__\nfrom wagtail.utils.setup import assets, check_bdist_egg, sdist\n\ntry:\n from setuptools import find_packages, setup\nexcept ImportError:\n from distutils.core import setup\n\n\n# Hack to prevent \"TypeError: 'NoneType' object is not callable\" error\n# in multiprocessing/util.py _exit_function when setup.py exits\n# (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)\ntry:\n import multiprocessing # noqa: F401\nexcept ImportError:\n pass\n\n\ninstall_requires = [\n \"Django>=4.2,<6.0\",\n \"django-modelcluster>=6.2.1,<7.0\",\n \"django-permissionedforms>=0.1,<1.0\",\n \"django-taggit>=4.0,<5.1\",\n \"django-treebeard>=4.5.1,<5.0\",\n \"djangorestframework>=3.11.1,<4.0\",\n \"django-filter>=23.3,<24\",\n \"draftjs_exporter>=2.1.5,<6.0\",\n \"Pillow>=9.1.0,<11.0.0\",\n \"beautifulsoup4>=4.8,<4.13\",\n \"Willow[heif]>=1.8.0,<2\",\n \"requests>=2.11.1,<3.0\",\n \"l18n>=2018.5\",\n \"openpyxl>=3.0.10,<4.0\",\n \"anyascii>=0.1.5\",\n \"telepath>=0.3.1,<1\",\n \"laces>=0.1,<0.2\",\n]\n\n# Testing dependencies\ntesting_extras = [\n # Required for running the tests\n \"python-dateutil>=2.7\",\n \"pytz>=2014.7\",\n \"Jinja2>=3.0,<3.2\",\n \"boto3>=1.28,<2\",\n \"freezegun>=0.3.8\",\n \"azure-mgmt-cdn>=12.0,<13.0\",\n \"azure-mgmt-frontdoor>=1.0,<1.1\",\n \"django-pattern-library>=0.7\",\n # For coverage and PEP8 linting\n \"coverage>=3.7.0\",\n \"doc8==0.8.1\",\n \"ruff==0.1.5\",\n # For enforcing string formatting mechanism in source files\n \"semgrep==1.40.0\",\n # For templates linting\n \"curlylint==0.13.1\",\n # For template indenting\n \"djhtml==3.0.6\",\n # For validating string formats in .po translation files\n \"polib>=1.1,<2.0\",\n # For wagtail.test.utils.wagtail_factories (used for streamfield migration toolkit)\n \"factory-boy>=3.2\",\n # For running tests in parallel\n \"tblib>=2.0,<3.0\",\n]\n\n# Documentation dependencies\ndocumentation_extras = [\n \"pyenchant>=3.1.1,<4\",\n \"sphinxcontrib-spelling>=7,<8\",\n \"Sphinx>=1.5.2\",\n \"sphinx-autobuild>=0.6.0\",\n \"sphinx-wagtail-theme==6.2.0\",\n \"myst_parser==2.0.0\",\n \"sphinx_copybutton>=0.5,<1.0\",\n]\n\nsetup(\n name=\"wagtail\",\n version=__version__,\n description=\"A Django content management system.\",\n author=\"Wagtail core team + contributors\",\n author_email=\"[email protected]\", # For support queries, please see https://docs.wagtail.org/en/stable/support.html\n url=\"https://wagtail.org/\",\n project_urls={\n \"Changelog\": \"https://github.com/wagtail/wagtail/blob/main/CHANGELOG.txt\",\n \"Documentation\": \"https://docs.wagtail.org\",\n \"Source\": \"https://github.com/wagtail/wagtail\",\n \"Tracker\": \"https://github.com/wagtail/wagtail/issues\",\n },\n packages=find_packages(),\n include_package_data=True,\n license=\"BSD\",\n long_description=\"Wagtail is an open source content management \\\nsystem built on Django, with a strong community and commercial support. \\\nIt’s focused on user experience, and offers precise control for \\\ndesigners and developers.\\n\\n\\\nFor more details, see https://wagtail.org, https://docs.wagtail.org and \\\nhttps://github.com/wagtail/wagtail/.\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Framework :: Django\",\n \"Framework :: Django :: 4.2\",\n \"Framework :: Django :: 5.0\",\n \"Framework :: Wagtail\",\n \"Topic :: Internet :: WWW/HTTP :: Site Management\",\n ],\n python_requires=\">=3.8\",\n install_requires=install_requires,\n extras_require={\"testing\": testing_extras, \"docs\": documentation_extras},\n entry_points=\"\"\"\n [console_scripts]\n wagtail=wagtail.bin.wagtail:main\n \"\"\",\n zip_safe=False,\n cmdclass={\n \"sdist\": sdist,\n \"bdist_egg\": check_bdist_egg,\n \"assets\": assets,\n },\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\nfrom wagtail import __version__\nfrom wagtail.utils.setup import assets, check_bdist_egg, sdist\n\ntry:\n from setuptools import find_packages, setup\nexcept ImportError:\n from distutils.core import setup\n\n\n# Hack to prevent \"TypeError: 'NoneType' object is not callable\" error\n# in multiprocessing/util.py _exit_function when setup.py exits\n# (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)\ntry:\n import multiprocessing # noqa: F401\nexcept ImportError:\n pass\n\n\ninstall_requires = [\n \"Django>=4.2,<6.0\",\n \"django-modelcluster>=6.2.1,<7.0\",\n \"django-permissionedforms>=0.1,<1.0\",\n \"django-taggit>=4.0,<5.1\",\n \"django-treebeard>=4.5.1,<5.0\",\n \"djangorestframework>=3.11.1,<4.0\",\n \"django-filter>=23.3,<24\",\n \"draftjs_exporter>=2.1.5,<6.0\",\n \"Pillow>=9.1.0,<11.0.0\",\n \"beautifulsoup4>=4.8,<4.13\",\n \"Willow[heif]>=1.8.0,<2\",\n \"requests>=2.11.1,<3.0\",\n \"l18n>=2018.5\",\n \"openpyxl>=3.0.10,<4.0\",\n \"anyascii>=0.1.5\",\n \"telepath>=0.3.1,<1\",\n \"laces>=0.1,<0.2\",\n]\n\n# Testing dependencies\ntesting_extras = [\n # Required for running the tests\n \"python-dateutil>=2.7\",\n \"pytz>=2014.7\",\n \"Jinja2>=3.0,<3.2\",\n \"boto3>=1.28,<2\",\n \"freezegun>=0.3.8\",\n \"azure-mgmt-cdn>=12.0,<13.0\",\n \"azure-mgmt-frontdoor>=1.0,<1.1\",\n \"django-pattern-library>=0.7\",\n # For coverage and PEP8 linting\n \"coverage>=3.7.0\",\n \"doc8==0.8.1\",\n \"ruff==0.1.5\",\n # For enforcing string formatting mechanism in source files\n \"semgrep==1.40.0\",\n # For templates linting\n \"curlylint==0.13.1\",\n # For template indenting\n \"djhtml==3.0.6\",\n # For validating string formats in .po translation files\n \"polib>=1.1,<2.0\",\n # For wagtail.test.utils.wagtail_factories (used for streamfield migration toolkit)\n \"factory-boy>=3.2\",\n # For running tests in parallel\n \"tblib>=2.0,<3.0\",\n]\n\n# Documentation dependencies\ndocumentation_extras = [\n \"pyenchant>=3.1.1,<4\",\n \"sphinxcontrib-spelling>=7,<8\",\n \"Sphinx>=1.5.2\",\n \"sphinx-autobuild>=0.6.0\",\n \"sphinx-wagtail-theme==6.3.0\",\n \"myst_parser==2.0.0\",\n \"sphinx_copybutton>=0.5,<1.0\",\n]\n\nsetup(\n name=\"wagtail\",\n version=__version__,\n description=\"A Django content management system.\",\n author=\"Wagtail core team + contributors\",\n author_email=\"[email protected]\", # For support queries, please see https://docs.wagtail.org/en/stable/support.html\n url=\"https://wagtail.org/\",\n project_urls={\n \"Changelog\": \"https://github.com/wagtail/wagtail/blob/main/CHANGELOG.txt\",\n \"Documentation\": \"https://docs.wagtail.org\",\n \"Source\": \"https://github.com/wagtail/wagtail\",\n \"Tracker\": \"https://github.com/wagtail/wagtail/issues\",\n },\n packages=find_packages(),\n include_package_data=True,\n license=\"BSD\",\n long_description=\"Wagtail is an open source content management \\\nsystem built on Django, with a strong community and commercial support. \\\nIt’s focused on user experience, and offers precise control for \\\ndesigners and developers.\\n\\n\\\nFor more details, see https://wagtail.org, https://docs.wagtail.org and \\\nhttps://github.com/wagtail/wagtail/.\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Framework :: Django\",\n \"Framework :: Django :: 4.2\",\n \"Framework :: Django :: 5.0\",\n \"Framework :: Wagtail\",\n \"Topic :: Internet :: WWW/HTTP :: Site Management\",\n ],\n python_requires=\">=3.8\",\n install_requires=install_requires,\n extras_require={\"testing\": testing_extras, \"docs\": documentation_extras},\n entry_points=\"\"\"\n [console_scripts]\n wagtail=wagtail.bin.wagtail:main\n \"\"\",\n zip_safe=False,\n cmdclass={\n \"sdist\": sdist,\n \"bdist_egg\": check_bdist_egg,\n \"assets\": assets,\n },\n)\n",
"path": "setup.py"
}
] | diff --git a/CHANGELOG.txt b/CHANGELOG.txt
index bff69275dbb4..31f560206117 100644
--- a/CHANGELOG.txt
+++ b/CHANGELOG.txt
@@ -19,12 +19,14 @@ Changelog
* Fix: Correctly handle `date` objects on `human_readable_date` template tag (Jhonatan Lopes)
* Fix: Ensure re-ordering buttons work correctly when using a nested InlinePanel (Adrien Hamraoui)
* Fix: Consistently remove model's `verbose_name` in group edit view when listing custom permissions (Sage Abdullah, Neeraj Yetheendran, Omkar Jadhav)
+ * Fix: Resolve issue local development of docs when running `make livehtml` (Sage Abdullah)
* Docs: Add contributing development documentation on how to work with a fork of Wagtail (Nix Asteri, Dan Braghis)
* Docs: Make sure the settings panel is listed in tabbed interface examples (Tibor Leupold)
* Docs: Update content and page names to their US spelling instead of UK spelling (Victoria Poromon)
* Docs: Update broken and incorrect links throughout the documentation (EK303)
* Docs: Fix formatting of `--purge-only` in `wagtail_update_image_renditions` management command section (Pranith Beeram)
* Docs: Update template components documentation to better explain the usage of the Laces library (Tibor Leupold)
+ * Docs: Update Sphinx theme to `6.3.0` with a fix for the missing favicon (Sage Abdullah)
* Maintenance: Move RichText HTML whitelist parser to use the faster, built in `html.parser` (Jake Howard)
* Maintenance: Remove duplicate 'path' in default_exclude_fields_in_copy (Ramchandra Shahi Thakuri)
* Maintenance: Update unit tests to always use the faster, built in `html.parser` & remove `html5lib` dependency (Jake Howard)
diff --git a/docs/Makefile b/docs/Makefile
index ffe6fa0ca9cd..d5d0de4d013a 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -2,7 +2,7 @@
#
# You can set these variables from the command line.
-SPHINXOPTS = -W --keep-going -n
+SPHINXOPTS = -W --keep-going -n -jauto
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
@@ -184,7 +184,7 @@ pseudoxml:
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
+# Override SPHINXOPTS as `sphinx-autobuild` does not have `--keep-going` option
+livehtml: SPHINXOPTS = -W -n -jauto
livehtml:
sphinx-autobuild --port 4000 --host 0.0.0.0 -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
-
-
diff --git a/docs/releases/6.1.md b/docs/releases/6.1.md
index 7a41f48d7479..f40e7c833564 100644
--- a/docs/releases/6.1.md
+++ b/docs/releases/6.1.md
@@ -32,6 +32,7 @@ depth: 1
* Correctly handle `date` objects on `human_readable_date` template tag (Jhonatan Lopes)
* Ensure re-ordering buttons work correctly when using a nested InlinePanel (Adrien Hamraoui)
* Consistently remove model's `verbose_name` in group edit view when listing custom permissions (Sage Abdullah, Neeraj Yetheendran, Omkar Jadhav)
+ * Resolve issue local development of docs when running `make livehtml` (Sage Abdullah)
### Documentation
@@ -42,6 +43,7 @@ depth: 1
* Update broken and incorrect links throughout the documentation (EK303)
* Fix formatting of `--purge-only` in [`wagtail_update_image_renditions`](wagtail_update_image_renditions) management command section (Pranith Beeram)
* Update [template components](creating_template_components) documentation to better explain the usage of the Laces library (Tibor Leupold)
+ * Update Sphinx theme to `6.3.0` with a fix for the missing favicon (Sage Abdullah)
### Maintenance
diff --git a/setup.py b/setup.py
index 904d2959df60..34b6404027b4 100755
--- a/setup.py
+++ b/setup.py
@@ -73,7 +73,7 @@
"sphinxcontrib-spelling>=7,<8",
"Sphinx>=1.5.2",
"sphinx-autobuild>=0.6.0",
- "sphinx-wagtail-theme==6.2.0",
+ "sphinx-wagtail-theme==6.3.0",
"myst_parser==2.0.0",
"sphinx_copybutton>=0.5,<1.0",
]
|
rlworkgroup__garage-971 | pytest flag --strict-markers requires version 4.5.0
pytest flag `--strict-markers` in https://github.com/rlworkgroup/garage/blob/master/setup.cfg#L79 requires version >= 4.5.0.
See https://docs.pytest.org/en/latest/changelog.html#pytest-4-5-0-2019-05-11
| [
{
"content": "\"\"\"setuptools based setup module.\"\"\"\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nTF_VERSION = '<1.16,>=1.15.0'\nGYM_VERSION = '==0.12.4'\n\n# Required dependencies\nREQUIRED = [\n # Please keep alphabetized\n 'akro==0.0.6',\n 'cached_property',\n 'click',\n 'cloudpickle',\n 'cma==2.7.0',\n 'dowel==0.0.2',\n 'gym[atari,box2d,classic_control]' + GYM_VERSION,\n 'joblib<0.13,>=0.12',\n 'matplotlib',\n 'numpy>=1.14.5',\n 'psutil',\n # Pyglet 1.4.0 introduces some api change which breaks some\n # gym environments\n # See: https://github.com/openai/gym/issues/1588\n 'pyglet<1.4.0,>=1.3.0',\n 'pyprind',\n 'python-dateutil',\n 'torch==1.3.0',\n 'ray',\n 'scikit-image',\n 'scipy',\n 'tensorflow' + TF_VERSION,\n 'tensorflow-probability',\n 'torchvision==0.4.1'\n]\n\n# Dependencies for optional features\nEXTRAS = {}\n\nEXTRAS['mujoco'] = [\n 'mujoco-py<2.1,>=2.0',\n 'gym[all]' + GYM_VERSION,\n]\n\nEXTRAS['dm_control'] = [\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control @ https://api.github.com/repos/deepmind/dm_control/tarball/7a36377879c57777e5d5b4da5aae2cd2a29b607a', # pylint: disable=line-too-long; # noqa: E501\n]\n\nEXTRAS['all'] = list(set(sum(EXTRAS.values(), [])))\n\n# dependencies for using gpu, not included in 'all'\nEXTRAS['gpu'] = ['tensorflow-gpu' + TF_VERSION]\n\n# Development dependencies (*not* included in 'all')\nEXTRAS['dev'] = [\n # Please keep alphabetized\n 'baselines @ https://api.github.com/repos/openai/baselines/tarball/f2729693253c0ef4d4086231d36e0a4307ec1cb3', # pylint: disable=line-too-long; # noqa: E501\n 'flake8',\n 'flake8-docstrings>=1.5.0',\n 'flake8-import-order',\n 'gtimer',\n 'pandas',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n 'pycodestyle>=2.5.0',\n 'pydocstyle>=4.0.0',\n 'pylint>=2.4.3',\n 'pytest>=3.6', # Required for pytest-cov on Python 3.6\n 'pytest-cov',\n 'pytest-xdist',\n 'recommonmark',\n 'rlkit @ git+https://github.com/vitchyr/rlkit/@1d469a509b797ca04a39b8734c1816ca7d108fc8', # pylint: disable=line-too-long; # noqa: E501\n 'seaborn',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'yapf==0.28.0',\n]\n\nwith open('README.md') as f:\n README = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n VERSION = v.read().strip()\n\nsetup(\n name='garage',\n version=VERSION,\n author='Reinforcement Learning Working Group',\n description='A toolkit for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n python_requires='>=3.5',\n install_requires=REQUIRED,\n extras_require=EXTRAS,\n license='MIT',\n long_description=README,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n",
"path": "setup.py"
}
] | [
{
"content": "\"\"\"setuptools based setup module.\"\"\"\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nTF_VERSION = '<1.16,>=1.15.0'\nGYM_VERSION = '==0.12.4'\n\n# Required dependencies\nREQUIRED = [\n # Please keep alphabetized\n 'akro==0.0.6',\n 'cached_property',\n 'click',\n 'cloudpickle',\n 'cma==2.7.0',\n 'dowel==0.0.2',\n 'gym[atari,box2d,classic_control]' + GYM_VERSION,\n 'joblib<0.13,>=0.12',\n 'matplotlib',\n 'numpy>=1.14.5',\n 'psutil',\n # Pyglet 1.4.0 introduces some api change which breaks some\n # gym environments\n # See: https://github.com/openai/gym/issues/1588\n 'pyglet<1.4.0,>=1.3.0',\n 'pyprind',\n 'python-dateutil',\n 'torch==1.3.0',\n 'ray',\n 'scikit-image',\n 'scipy',\n 'tensorflow' + TF_VERSION,\n 'tensorflow-probability',\n 'torchvision==0.4.1'\n]\n\n# Dependencies for optional features\nEXTRAS = {}\n\nEXTRAS['mujoco'] = [\n 'mujoco-py<2.1,>=2.0',\n 'gym[all]' + GYM_VERSION,\n]\n\nEXTRAS['dm_control'] = [\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control @ https://api.github.com/repos/deepmind/dm_control/tarball/7a36377879c57777e5d5b4da5aae2cd2a29b607a', # pylint: disable=line-too-long; # noqa: E501\n]\n\nEXTRAS['all'] = list(set(sum(EXTRAS.values(), [])))\n\n# dependencies for using gpu, not included in 'all'\nEXTRAS['gpu'] = ['tensorflow-gpu' + TF_VERSION]\n\n# Development dependencies (*not* included in 'all')\nEXTRAS['dev'] = [\n # Please keep alphabetized\n 'baselines @ https://api.github.com/repos/openai/baselines/tarball/f2729693253c0ef4d4086231d36e0a4307ec1cb3', # pylint: disable=line-too-long; # noqa: E501\n 'flake8',\n 'flake8-docstrings>=1.5.0',\n 'flake8-import-order',\n 'gtimer',\n 'pandas',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n 'pycodestyle>=2.5.0',\n 'pydocstyle>=4.0.0',\n 'pylint>=2.4.3',\n 'pytest>=4.5.0', # Required for strict-markers\n 'pytest-cov',\n 'pytest-xdist',\n 'recommonmark',\n 'rlkit @ git+https://github.com/vitchyr/rlkit/@1d469a509b797ca04a39b8734c1816ca7d108fc8', # pylint: disable=line-too-long; # noqa: E501\n 'seaborn',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'yapf==0.28.0',\n]\n\nwith open('README.md') as f:\n README = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n VERSION = v.read().strip()\n\nsetup(\n name='garage',\n version=VERSION,\n author='Reinforcement Learning Working Group',\n description='A toolkit for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n python_requires='>=3.5',\n install_requires=REQUIRED,\n extras_require=EXTRAS,\n license='MIT',\n long_description=README,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 204c7a25bf..42cde19847 100644
--- a/setup.py
+++ b/setup.py
@@ -69,7 +69,7 @@
'pycodestyle>=2.5.0',
'pydocstyle>=4.0.0',
'pylint>=2.4.3',
- 'pytest>=3.6', # Required for pytest-cov on Python 3.6
+ 'pytest>=4.5.0', # Required for strict-markers
'pytest-cov',
'pytest-xdist',
'recommonmark',
|
ethereum__consensus-specs-2750 | Simplify sync protocol and update to calculate optimistic heads
1. Simplify `valid_updates` to `best_valid_update` so the `LightClientStore` only needs to store O(1) data
2. Track an optimistic head, by looking for the highest-slot header which passes a safety threshold
| [
{
"content": "from setuptools import setup, find_packages, Command\nfrom setuptools.command.build_py import build_py\nfrom distutils import dir_util\nfrom distutils.util import convert_path\nfrom pathlib import Path\nimport os\nimport re\nimport string\nimport textwrap\nfrom typing import Dict, NamedTuple, List, Sequence, Optional, TypeVar\nfrom abc import ABC, abstractmethod\nimport ast\nimport subprocess\nimport sys\n\n# NOTE: have to programmatically include third-party dependencies in `setup.py`.\ndef installPackage(package: str):\n subprocess.check_call([sys.executable, '-m', 'pip', 'install', package])\n\nRUAMEL_YAML_VERSION = \"ruamel.yaml==0.16.5\"\ntry:\n import ruamel.yaml\nexcept ImportError:\n installPackage(RUAMEL_YAML_VERSION)\n\nfrom ruamel.yaml import YAML\n\nMARKO_VERSION = \"marko==1.0.2\"\ntry:\n import marko\nexcept ImportError:\n installPackage(MARKO_VERSION)\n\nfrom marko.block import Heading, FencedCode, LinkRefDef, BlankLine\nfrom marko.inline import CodeSpan\nfrom marko.ext.gfm import gfm\nfrom marko.ext.gfm.elements import Table\n\n\n# Definitions in context.py\nPHASE0 = 'phase0'\nALTAIR = 'altair'\nMERGE = 'merge'\n\n# The helper functions that are used when defining constants\nCONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS = '''\ndef ceillog2(x: int) -> uint64:\n if x < 1:\n raise ValueError(f\"ceillog2 accepts only positive values, x={x}\")\n return uint64((x - 1).bit_length())\n\n\ndef floorlog2(x: int) -> uint64:\n if x < 1:\n raise ValueError(f\"floorlog2 accepts only positive values, x={x}\")\n return uint64(x.bit_length() - 1)\n'''\n\n\nOPTIMIZED_BLS_AGGREGATE_PUBKEYS = '''\ndef eth_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey:\n return bls.AggregatePKs(pubkeys)\n'''\n\n\nclass ProtocolDefinition(NamedTuple):\n # just function definitions currently. May expand with configuration vars in future.\n functions: Dict[str, str]\n\n\nclass VariableDefinition(NamedTuple):\n type_name: Optional[str]\n value: str\n comment: Optional[str] # e.g. \"noqa: E501\"\n\n\nclass SpecObject(NamedTuple):\n functions: Dict[str, str]\n protocols: Dict[str, ProtocolDefinition]\n custom_types: Dict[str, str]\n constant_vars: Dict[str, VariableDefinition]\n preset_vars: Dict[str, VariableDefinition]\n config_vars: Dict[str, VariableDefinition]\n ssz_dep_constants: Dict[str, str] # the constants that depend on ssz_objects\n ssz_objects: Dict[str, str]\n dataclasses: Dict[str, str]\n\n\ndef _get_name_from_heading(heading: Heading) -> Optional[str]:\n last_child = heading.children[-1]\n if isinstance(last_child, CodeSpan):\n return last_child.children\n return None\n\n\ndef _get_source_from_code_block(block: FencedCode) -> str:\n return block.children[0].children.strip()\n\n\ndef _get_function_name_from_source(source: str) -> str:\n fn = ast.parse(source).body[0]\n return fn.name\n\n\ndef _get_self_type_from_source(source: str) -> Optional[str]:\n fn = ast.parse(source).body[0]\n args = fn.args.args\n if len(args) == 0:\n return None\n if args[0].arg != 'self':\n return None\n if args[0].annotation is None:\n return None\n return args[0].annotation.id\n\n\ndef _get_class_info_from_source(source: str) -> (str, Optional[str]):\n class_def = ast.parse(source).body[0]\n base = class_def.bases[0]\n if isinstance(base, ast.Name):\n parent_class = base.id\n else:\n # NOTE: SSZ definition derives from earlier phase...\n # e.g. `phase0.SignedBeaconBlock`\n # TODO: check for consistency with other phases\n parent_class = None\n return class_def.name, parent_class\n\n\ndef _is_constant_id(name: str) -> bool:\n if name[0] not in string.ascii_uppercase + '_':\n return False\n return all(map(lambda c: c in string.ascii_uppercase + '_' + string.digits, name[1:]))\n\n\nETH2_SPEC_COMMENT_PREFIX = \"eth2spec:\"\n\n\ndef _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]:\n _, _, title = child._parse_info\n if not (title[0] == \"(\" and title[len(title)-1] == \")\"):\n return None\n title = title[1:len(title)-1]\n if not title.startswith(ETH2_SPEC_COMMENT_PREFIX):\n return None\n return title[len(ETH2_SPEC_COMMENT_PREFIX):].strip()\n\n\ndef _parse_value(name: str, typed_value: str) -> VariableDefinition:\n comment = None\n if name == \"BLS12_381_Q\":\n comment = \"noqa: E501\"\n\n typed_value = typed_value.strip()\n if '(' not in typed_value:\n return VariableDefinition(type_name=None, value=typed_value, comment=comment)\n i = typed_value.index('(')\n type_name = typed_value[:i]\n\n return VariableDefinition(type_name=type_name, value=typed_value[i+1:-1], comment=comment)\n\n\ndef get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str]) -> SpecObject:\n functions: Dict[str, str] = {}\n protocols: Dict[str, ProtocolDefinition] = {}\n constant_vars: Dict[str, VariableDefinition] = {}\n preset_vars: Dict[str, VariableDefinition] = {}\n config_vars: Dict[str, VariableDefinition] = {}\n ssz_dep_constants: Dict[str, str] = {}\n ssz_objects: Dict[str, str] = {}\n dataclasses: Dict[str, str] = {}\n custom_types: Dict[str, str] = {}\n\n with open(file_name) as source_file:\n document = gfm.parse(source_file.read())\n\n current_name = None\n should_skip = False\n for child in document.children:\n if isinstance(child, BlankLine):\n continue\n if should_skip:\n should_skip = False\n continue\n if isinstance(child, Heading):\n current_name = _get_name_from_heading(child)\n elif isinstance(child, FencedCode):\n if child.lang != \"python\":\n continue\n source = _get_source_from_code_block(child)\n if source.startswith(\"def\"):\n current_name = _get_function_name_from_source(source)\n self_type_name = _get_self_type_from_source(source)\n function_def = \"\\n\".join(line.rstrip() for line in source.splitlines())\n if self_type_name is None:\n functions[current_name] = function_def\n else:\n if self_type_name not in protocols:\n protocols[self_type_name] = ProtocolDefinition(functions={})\n protocols[self_type_name].functions[current_name] = function_def\n elif source.startswith(\"@dataclass\"):\n dataclasses[current_name] = \"\\n\".join(line.rstrip() for line in source.splitlines())\n elif source.startswith(\"class\"):\n class_name, parent_class = _get_class_info_from_source(source)\n # check consistency with spec\n assert class_name == current_name\n if parent_class:\n assert parent_class == \"Container\"\n # NOTE: trim whitespace from spec\n ssz_objects[current_name] = \"\\n\".join(line.rstrip() for line in source.splitlines())\n else:\n raise Exception(\"unrecognized python code element: \" + source)\n elif isinstance(child, Table):\n for row in child.children:\n cells = row.children\n if len(cells) >= 2:\n name_cell = cells[0]\n name = name_cell.children[0].children\n\n value_cell = cells[1]\n value = value_cell.children[0].children\n if isinstance(value, list):\n # marko parses `**X**` as a list containing a X\n value = value[0].children\n\n if not _is_constant_id(name):\n # Check for short type declarations\n if value.startswith((\"uint\", \"Bytes\", \"ByteList\", \"Union\")):\n custom_types[name] = value\n continue\n\n if value.startswith(\"get_generalized_index\"):\n ssz_dep_constants[name] = value\n continue\n\n value_def = _parse_value(name, value)\n if name in preset:\n preset_vars[name] = VariableDefinition(value_def.type_name, preset[name], value_def.comment)\n elif name in config:\n config_vars[name] = VariableDefinition(value_def.type_name, config[name], value_def.comment)\n else:\n constant_vars[name] = value_def\n\n elif isinstance(child, LinkRefDef):\n comment = _get_eth2_spec_comment(child)\n if comment == \"skip\":\n should_skip = True\n\n return SpecObject(\n functions=functions,\n protocols=protocols,\n custom_types=custom_types,\n constant_vars=constant_vars,\n preset_vars=preset_vars,\n config_vars=config_vars,\n ssz_dep_constants=ssz_dep_constants,\n ssz_objects=ssz_objects,\n dataclasses=dataclasses,\n )\n\n\nclass SpecBuilder(ABC):\n @property\n @abstractmethod\n def fork(self) -> str:\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def imports(cls, preset_name: str) -> str:\n \"\"\"\n Import objects from other libraries.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def preparations(cls) -> str:\n \"\"\"\n Define special types/constants for building pyspec or call functions.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def sundry_functions(cls) -> str:\n \"\"\"\n The functions that are (1) defined abstractly in specs or (2) adjusted for getting better performance.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:\n \"\"\"\n The constants that are required for SSZ objects.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]: # TODO\n \"\"\"\n The constants that are required for custom types.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]:\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def build_spec(cls, preset_name: str,\n source_files: List[Path], preset_files: Sequence[Path], config_file: Path) -> str:\n raise NotImplementedError()\n\n\n#\n# Phase0SpecBuilder\n#\nclass Phase0SpecBuilder(SpecBuilder):\n fork: str = PHASE0\n\n @classmethod\n def imports(cls, preset_name: str) -> str:\n return '''from lru import LRU\nfrom dataclasses import (\n dataclass,\n field,\n)\nfrom typing import (\n Any, Callable, Dict, Set, Sequence, Tuple, Optional, TypeVar, NamedTuple\n)\n\nfrom eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes\nfrom eth2spec.utils.ssz.ssz_typing import (\n View, boolean, Container, List, Vector, uint8, uint32, uint64,\n Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist)\nfrom eth2spec.utils.ssz.ssz_typing import Bitvector # noqa: F401\nfrom eth2spec.utils import bls\nfrom eth2spec.utils.hash_function import hash\n'''\n\n @classmethod\n def preparations(cls) -> str:\n return '''\nSSZObject = TypeVar('SSZObject', bound=View)\n'''\n\n @classmethod\n def sundry_functions(cls) -> str:\n return '''\ndef get_eth1_data(block: Eth1Block) -> Eth1Data:\n \"\"\"\n A stub function return mocking Eth1Data.\n \"\"\"\n return Eth1Data(\n deposit_root=block.deposit_root,\n deposit_count=block.deposit_count,\n block_hash=hash_tree_root(block))\n\n\ndef cache_this(key_fn, value_fn, lru_size): # type: ignore\n cache_dict = LRU(size=lru_size)\n\n def wrapper(*args, **kw): # type: ignore\n key = key_fn(*args, **kw)\n nonlocal cache_dict\n if key not in cache_dict:\n cache_dict[key] = value_fn(*args, **kw)\n return cache_dict[key]\n return wrapper\n\n\n_compute_shuffled_index = compute_shuffled_index\ncompute_shuffled_index = cache_this(\n lambda index, index_count, seed: (index, index_count, seed),\n _compute_shuffled_index, lru_size=SLOTS_PER_EPOCH * 3)\n\n_get_total_active_balance = get_total_active_balance\nget_total_active_balance = cache_this(\n lambda state: (state.validators.hash_tree_root(), compute_epoch_at_slot(state.slot)),\n _get_total_active_balance, lru_size=10)\n\n_get_base_reward = get_base_reward\nget_base_reward = cache_this(\n lambda state, index: (state.validators.hash_tree_root(), state.slot, index),\n _get_base_reward, lru_size=2048)\n\n_get_committee_count_per_slot = get_committee_count_per_slot\nget_committee_count_per_slot = cache_this(\n lambda state, epoch: (state.validators.hash_tree_root(), epoch),\n _get_committee_count_per_slot, lru_size=SLOTS_PER_EPOCH * 3)\n\n_get_active_validator_indices = get_active_validator_indices\nget_active_validator_indices = cache_this(\n lambda state, epoch: (state.validators.hash_tree_root(), epoch),\n _get_active_validator_indices, lru_size=3)\n\n_get_beacon_committee = get_beacon_committee\nget_beacon_committee = cache_this(\n lambda state, slot, index: (state.validators.hash_tree_root(), state.randao_mixes.hash_tree_root(), slot, index),\n _get_beacon_committee, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)\n\n_get_matching_target_attestations = get_matching_target_attestations\nget_matching_target_attestations = cache_this(\n lambda state, epoch: (state.hash_tree_root(), epoch),\n _get_matching_target_attestations, lru_size=10)\n\n_get_matching_head_attestations = get_matching_head_attestations\nget_matching_head_attestations = cache_this(\n lambda state, epoch: (state.hash_tree_root(), epoch),\n _get_matching_head_attestations, lru_size=10)\n\n_get_attesting_indices = get_attesting_indices\nget_attesting_indices = cache_this(\n lambda state, data, bits: (\n state.randao_mixes.hash_tree_root(),\n state.validators.hash_tree_root(), data.hash_tree_root(), bits.hash_tree_root()\n ),\n _get_attesting_indices, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)'''\n\n @classmethod\n def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:\n return {}\n\n @classmethod\n def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]:\n return {}\n\n @classmethod\n def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]:\n return functions\n\n @classmethod\n def build_spec(cls, preset_name: str,\n source_files: Sequence[Path], preset_files: Sequence[Path], config_file: Path) -> str:\n return _build_spec(preset_name, cls.fork, source_files, preset_files, config_file)\n\n\n#\n# AltairSpecBuilder\n#\nclass AltairSpecBuilder(Phase0SpecBuilder):\n fork: str = ALTAIR\n\n @classmethod\n def imports(cls, preset_name: str) -> str:\n return super().imports(preset_name) + '\\n' + f'''\nfrom typing import NewType, Union as PyUnion\n\nfrom eth2spec.phase0 import {preset_name} as phase0\nfrom eth2spec.utils.ssz.ssz_typing import Path\n'''\n\n @classmethod\n def preparations(cls):\n return super().preparations() + '\\n' + '''\nSSZVariableName = str\nGeneralizedIndex = NewType('GeneralizedIndex', int)\n'''\n\n @classmethod\n def sundry_functions(cls) -> str:\n return super().sundry_functions() + '\\n\\n' + '''\ndef get_generalized_index(ssz_class: Any, *path: Sequence[PyUnion[int, SSZVariableName]]) -> GeneralizedIndex:\n ssz_path = Path(ssz_class)\n for item in path:\n ssz_path = ssz_path / item\n return GeneralizedIndex(ssz_path.gindex())'''\n\n\n @classmethod\n def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:\n constants = {\n 'FINALIZED_ROOT_INDEX': 'GeneralizedIndex(105)',\n 'NEXT_SYNC_COMMITTEE_INDEX': 'GeneralizedIndex(55)',\n }\n return {**super().hardcoded_ssz_dep_constants(), **constants}\n\n @classmethod\n def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]:\n if \"eth_aggregate_pubkeys\" in functions:\n functions[\"eth_aggregate_pubkeys\"] = OPTIMIZED_BLS_AGGREGATE_PUBKEYS.strip()\n return super().implement_optimizations(functions)\n\n#\n# MergeSpecBuilder\n#\nclass MergeSpecBuilder(AltairSpecBuilder):\n fork: str = MERGE\n\n @classmethod\n def imports(cls, preset_name: str):\n return super().imports(preset_name) + f'''\nfrom typing import Protocol\nfrom eth2spec.altair import {preset_name} as altair\nfrom eth2spec.utils.ssz.ssz_typing import Bytes8, Bytes20, ByteList, ByteVector, uint256\n'''\n\n @classmethod\n def preparations(cls):\n return super().preparations()\n\n @classmethod\n def sundry_functions(cls) -> str:\n return super().sundry_functions() + '\\n\\n' + \"\"\"\nExecutionState = Any\n\n\ndef get_pow_block(hash: Bytes32) -> Optional[PowBlock]:\n return PowBlock(block_hash=hash, parent_hash=Bytes32(), total_difficulty=uint256(0))\n\n\ndef get_execution_state(execution_state_root: Bytes32) -> ExecutionState:\n pass\n\n\ndef get_pow_chain_head() -> PowBlock:\n pass\n\n\nclass NoopExecutionEngine(ExecutionEngine):\n\n def execute_payload(self: ExecutionEngine, execution_payload: ExecutionPayload) -> bool:\n return True\n\n def notify_forkchoice_updated(self: ExecutionEngine,\n head_block_hash: Hash32,\n finalized_block_hash: Hash32,\n payload_attributes: Optional[PayloadAttributes]) -> Optional[PayloadId]:\n pass\n\n def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> ExecutionPayload:\n raise NotImplementedError(\"no default block production\")\n\n\nEXECUTION_ENGINE = NoopExecutionEngine()\"\"\"\n\n\n @classmethod\n def hardcoded_custom_type_dep_constants(cls) -> str:\n constants = {\n 'MAX_BYTES_PER_TRANSACTION': 'uint64(2**30)',\n }\n return {**super().hardcoded_custom_type_dep_constants(), **constants}\n\n\nspec_builders = {\n builder.fork: builder\n for builder in (Phase0SpecBuilder, AltairSpecBuilder, MergeSpecBuilder)\n}\n\n\ndef is_spec_defined_type(value: str) -> bool:\n return value.startswith('ByteList') or value.startswith('Union')\n\n\ndef objects_to_spec(preset_name: str,\n spec_object: SpecObject,\n builder: SpecBuilder,\n ordered_class_objects: Dict[str, str]) -> str:\n \"\"\"\n Given all the objects that constitute a spec, combine them into a single pyfile.\n \"\"\"\n new_type_definitions = (\n '\\n\\n'.join(\n [\n f\"class {key}({value}):\\n pass\\n\"\n for key, value in spec_object.custom_types.items()\n if not is_spec_defined_type(value)\n ]\n )\n + ('\\n\\n' if len([key for key, value in spec_object.custom_types.items() if is_spec_defined_type(value)]) > 0 else '')\n + '\\n\\n'.join(\n [\n f\"{key} = {value}\\n\"\n for key, value in spec_object.custom_types.items()\n if is_spec_defined_type(value)\n ]\n )\n )\n\n def format_protocol(protocol_name: str, protocol_def: ProtocolDefinition) -> str:\n protocol = f\"class {protocol_name}(Protocol):\"\n for fn_source in protocol_def.functions.values():\n fn_source = fn_source.replace(\"self: \"+protocol_name, \"self\")\n protocol += \"\\n\\n\" + textwrap.indent(fn_source, \" \")\n return protocol\n\n protocols_spec = '\\n\\n\\n'.join(format_protocol(k, v) for k, v in spec_object.protocols.items())\n for k in list(spec_object.functions):\n if \"ceillog2\" in k or \"floorlog2\" in k:\n del spec_object.functions[k]\n functions = builder.implement_optimizations(spec_object.functions)\n functions_spec = '\\n\\n\\n'.join(functions.values())\n\n # Access global dict of config vars for runtime configurables\n for name in spec_object.config_vars.keys():\n functions_spec = re.sub(r\"\\b%s\\b\" % name, 'config.' + name, functions_spec)\n\n def format_config_var(name: str, vardef: VariableDefinition) -> str:\n if vardef.type_name is None:\n out = f'{name}={vardef.value},'\n else:\n out = f'{name}={vardef.type_name}({vardef.value}),'\n if vardef.comment is not None:\n out += f' # {vardef.comment}'\n return out\n\n config_spec = 'class Configuration(NamedTuple):\\n'\n config_spec += ' PRESET_BASE: str\\n'\n config_spec += '\\n'.join(f' {k}: {v.type_name if v.type_name is not None else \"int\"}'\n for k, v in spec_object.config_vars.items())\n config_spec += '\\n\\n\\nconfig = Configuration(\\n'\n config_spec += f' PRESET_BASE=\"{preset_name}\",\\n'\n config_spec += '\\n'.join(' ' + format_config_var(k, v) for k, v in spec_object.config_vars.items())\n config_spec += '\\n)\\n'\n\n def format_constant(name: str, vardef: VariableDefinition) -> str:\n if vardef.type_name is None:\n out = f'{name} = {vardef.value}'\n else:\n out = f'{name} = {vardef.type_name}({vardef.value})'\n if vardef.comment is not None:\n out += f' # {vardef.comment}'\n return out\n\n constant_vars_spec = '# Constant vars\\n' + '\\n'.join(format_constant(k, v) for k, v in spec_object.constant_vars.items())\n preset_vars_spec = '# Preset vars\\n' + '\\n'.join(format_constant(k, v) for k, v in spec_object.preset_vars.items())\n ordered_class_objects_spec = '\\n\\n\\n'.join(ordered_class_objects.values())\n ssz_dep_constants = '\\n'.join(map(lambda x: '%s = %s' % (x, builder.hardcoded_ssz_dep_constants()[x]), builder.hardcoded_ssz_dep_constants()))\n ssz_dep_constants_verification = '\\n'.join(map(lambda x: 'assert %s == %s' % (x, spec_object.ssz_dep_constants[x]), builder.hardcoded_ssz_dep_constants()))\n custom_type_dep_constants = '\\n'.join(map(lambda x: '%s = %s' % (x, builder.hardcoded_custom_type_dep_constants()[x]), builder.hardcoded_custom_type_dep_constants()))\n spec = (\n builder.imports(preset_name)\n + builder.preparations()\n + '\\n\\n' + f\"fork = \\'{builder.fork}\\'\\n\"\n # The constants that some SSZ containers require. Need to be defined before `new_type_definitions`\n + ('\\n\\n' + custom_type_dep_constants + '\\n' if custom_type_dep_constants != '' else '')\n + '\\n\\n' + new_type_definitions\n + '\\n' + CONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS\n # The constants that some SSZ containers require. Need to be defined before `constants_spec`\n + ('\\n\\n' + ssz_dep_constants if ssz_dep_constants != '' else '')\n + '\\n\\n' + constant_vars_spec\n + '\\n\\n' + preset_vars_spec\n + '\\n\\n\\n' + config_spec\n + '\\n\\n' + ordered_class_objects_spec\n + ('\\n\\n\\n' + protocols_spec if protocols_spec != '' else '')\n + '\\n\\n\\n' + functions_spec\n + '\\n\\n' + builder.sundry_functions()\n # Since some constants are hardcoded in setup.py, the following assertions verify that the hardcoded constants are\n # as same as the spec definition.\n + ('\\n\\n\\n' + ssz_dep_constants_verification if ssz_dep_constants_verification != '' else '')\n + '\\n'\n )\n return spec\n\n\ndef combine_protocols(old_protocols: Dict[str, ProtocolDefinition],\n new_protocols: Dict[str, ProtocolDefinition]) -> Dict[str, ProtocolDefinition]:\n for key, value in new_protocols.items():\n if key not in old_protocols:\n old_protocols[key] = value\n else:\n functions = combine_dicts(old_protocols[key].functions, value.functions)\n old_protocols[key] = ProtocolDefinition(functions=functions)\n return old_protocols\n\n\nT = TypeVar('T')\n\n\ndef combine_dicts(old_dict: Dict[str, T], new_dict: Dict[str, T]) -> Dict[str, T]:\n return {**old_dict, **new_dict}\n\n\nignored_dependencies = [\n 'bit', 'boolean', 'Vector', 'List', 'Container', 'BLSPubkey', 'BLSSignature',\n 'Bytes1', 'Bytes4', 'Bytes8', 'Bytes20', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',\n 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\n 'bytes', 'byte', 'ByteList', 'ByteVector',\n 'Dict', 'dict', 'field', 'ceillog2', 'floorlog2', 'Set',\n]\n\n\ndef dependency_order_class_objects(objects: Dict[str, str], custom_types: Dict[str, str]) -> None:\n \"\"\"\n Determines which SSZ Object is dependent on which other and orders them appropriately\n \"\"\"\n items = list(objects.items())\n for key, value in items:\n dependencies = []\n for line in value.split('\\n'):\n if not re.match(r'\\s+\\w+: .+', line):\n continue # skip whitespace etc.\n line = line[line.index(':') + 1:] # strip of field name\n if '#' in line:\n line = line[:line.index('#')] # strip of comment\n dependencies.extend(re.findall(r'(\\w+)', line)) # catch all legible words, potential dependencies\n dependencies = filter(lambda x: '_' not in x and x.upper() != x, dependencies) # filter out constants\n dependencies = filter(lambda x: x not in ignored_dependencies, dependencies)\n dependencies = filter(lambda x: x not in custom_types, dependencies)\n for dep in dependencies:\n key_list = list(objects.keys())\n for item in [dep, key] + key_list[key_list.index(dep)+1:]:\n objects[item] = objects.pop(item)\n\n\ndef combine_ssz_objects(old_objects: Dict[str, str], new_objects: Dict[str, str], custom_types) -> Dict[str, str]:\n \"\"\"\n Takes in old spec and new spec ssz objects, combines them,\n and returns the newer versions of the objects in dependency order.\n \"\"\"\n for key, value in new_objects.items():\n old_objects[key] = value\n return old_objects\n\n\ndef combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:\n \"\"\"\n Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function.\n \"\"\"\n protocols = combine_protocols(spec0.protocols, spec1.protocols)\n functions = combine_dicts(spec0.functions, spec1.functions)\n custom_types = combine_dicts(spec0.custom_types, spec1.custom_types)\n constant_vars = combine_dicts(spec0.constant_vars, spec1.constant_vars)\n preset_vars = combine_dicts(spec0.preset_vars, spec1.preset_vars)\n config_vars = combine_dicts(spec0.config_vars, spec1.config_vars)\n ssz_dep_constants = combine_dicts(spec0.ssz_dep_constants, spec1.ssz_dep_constants)\n ssz_objects = combine_ssz_objects(spec0.ssz_objects, spec1.ssz_objects, custom_types)\n dataclasses = combine_dicts(spec0.dataclasses, spec1.dataclasses)\n return SpecObject(\n functions=functions,\n protocols=protocols,\n custom_types=custom_types,\n constant_vars=constant_vars,\n preset_vars=preset_vars,\n config_vars=config_vars,\n ssz_dep_constants=ssz_dep_constants,\n ssz_objects=ssz_objects,\n dataclasses=dataclasses,\n )\n\n\ndef parse_config_vars(conf: Dict[str, str]) -> Dict[str, str]:\n \"\"\"\n Parses a dict of basic str/int/list types into a dict for insertion into the spec code.\n \"\"\"\n out: Dict[str, str] = dict()\n for k, v in conf.items():\n if isinstance(v, str) and (v.startswith(\"0x\") or k == 'PRESET_BASE'):\n # Represent byte data with string, to avoid misinterpretation as big-endian int.\n # Everything is either byte data or an integer, with PRESET_BASE as one exception.\n out[k] = f\"'{v}'\"\n else:\n out[k] = str(int(v))\n return out\n\n\ndef load_preset(preset_files: Sequence[Path]) -> Dict[str, str]:\n \"\"\"\n Loads the a directory of preset files, merges the result into one preset.\n \"\"\"\n preset = {}\n for fork_file in preset_files:\n yaml = YAML(typ='base')\n fork_preset: dict = yaml.load(fork_file)\n if fork_preset is None: # for empty YAML files\n continue\n if not set(fork_preset.keys()).isdisjoint(preset.keys()):\n duplicates = set(fork_preset.keys()).intersection(set(preset.keys()))\n raise Exception(f\"duplicate config var(s) in preset files: {', '.join(duplicates)}\")\n preset.update(fork_preset)\n assert preset != {}\n return parse_config_vars(preset)\n\n\ndef load_config(config_path: Path) -> Dict[str, str]:\n \"\"\"\n Loads the given configuration file.\n \"\"\"\n yaml = YAML(typ='base')\n config_data = yaml.load(config_path)\n return parse_config_vars(config_data)\n\n\ndef _build_spec(preset_name: str, fork: str,\n source_files: Sequence[Path], preset_files: Sequence[Path], config_file: Path) -> str:\n preset = load_preset(preset_files)\n config = load_config(config_file)\n all_specs = [get_spec(spec, preset, config) for spec in source_files]\n\n spec_object = all_specs[0]\n for value in all_specs[1:]:\n spec_object = combine_spec_objects(spec_object, value)\n\n class_objects = {**spec_object.ssz_objects, **spec_object.dataclasses}\n dependency_order_class_objects(class_objects, spec_object.custom_types)\n\n return objects_to_spec(preset_name, spec_object, spec_builders[fork], class_objects)\n\n\nclass BuildTarget(NamedTuple):\n name: str\n preset_paths: List[Path]\n config_path: Path\n\n\nclass PySpecCommand(Command):\n \"\"\"Convert spec markdown files to a spec python file\"\"\"\n\n description = \"Convert spec markdown files to a spec python file\"\n\n spec_fork: str\n md_doc_paths: str\n parsed_md_doc_paths: List[str]\n build_targets: str\n parsed_build_targets: List[BuildTarget]\n out_dir: str\n\n # The format is (long option, short option, description).\n user_options = [\n ('spec-fork=', None, \"Spec fork to tag build with. Used to select md-docs defaults.\"),\n ('md-doc-paths=', None, \"List of paths of markdown files to build spec with\"),\n ('build-targets=', None, \"Names, directory paths of compile-time presets, and default config paths.\"),\n ('out-dir=', None, \"Output directory to write spec package to\")\n ]\n\n def initialize_options(self):\n \"\"\"Set default values for options.\"\"\"\n # Each user option must be listed here with their default value.\n self.spec_fork = PHASE0\n self.md_doc_paths = ''\n self.out_dir = 'pyspec_output'\n self.build_targets = \"\"\"\n minimal:presets/minimal:configs/minimal.yaml\n mainnet:presets/mainnet:configs/mainnet.yaml\n \"\"\"\n\n def finalize_options(self):\n \"\"\"Post-process options.\"\"\"\n if len(self.md_doc_paths) == 0:\n print(\"no paths were specified, using default markdown file paths for pyspec\"\n \" build (spec fork: %s)\" % self.spec_fork)\n if self.spec_fork in (PHASE0, ALTAIR, MERGE):\n self.md_doc_paths = \"\"\"\n specs/phase0/beacon-chain.md\n specs/phase0/fork-choice.md\n specs/phase0/validator.md\n specs/phase0/weak-subjectivity.md\n \"\"\"\n if self.spec_fork in (ALTAIR, MERGE):\n self.md_doc_paths += \"\"\"\n specs/altair/beacon-chain.md\n specs/altair/bls.md\n specs/altair/fork.md\n specs/altair/validator.md\n specs/altair/p2p-interface.md\n specs/altair/sync-protocol.md\n \"\"\"\n if self.spec_fork == MERGE:\n self.md_doc_paths += \"\"\"\n specs/merge/beacon-chain.md\n specs/merge/fork.md\n specs/merge/fork-choice.md\n specs/merge/validator.md\n \"\"\"\n if len(self.md_doc_paths) == 0:\n raise Exception('no markdown files specified, and spec fork \"%s\" is unknown', self.spec_fork)\n\n self.parsed_md_doc_paths = self.md_doc_paths.split()\n\n for filename in self.parsed_md_doc_paths:\n if not os.path.exists(filename):\n raise Exception('Pyspec markdown input file \"%s\" does not exist.' % filename)\n\n self.parsed_build_targets = []\n for target in self.build_targets.split():\n target = target.strip()\n data = target.split(':')\n if len(data) != 3:\n raise Exception('invalid target, expected \"name:preset_dir:config_file\" format, but got: %s' % target)\n name, preset_dir_path, config_path = data\n if any((c not in string.digits + string.ascii_letters) for c in name):\n raise Exception('invalid target name: \"%s\"' % name)\n if not os.path.exists(preset_dir_path):\n raise Exception('Preset dir \"%s\" does not exist' % preset_dir_path)\n _, _, preset_file_names = next(os.walk(preset_dir_path))\n preset_paths = [(Path(preset_dir_path) / name) for name in preset_file_names]\n\n if not os.path.exists(config_path):\n raise Exception('Config file \"%s\" does not exist' % config_path)\n self.parsed_build_targets.append(BuildTarget(name, preset_paths, Path(config_path)))\n\n def run(self):\n if not self.dry_run:\n dir_util.mkpath(self.out_dir)\n\n for (name, preset_paths, config_path) in self.parsed_build_targets:\n spec_str = spec_builders[self.spec_fork].build_spec(\n name, self.parsed_md_doc_paths, preset_paths, config_path)\n if self.dry_run:\n self.announce('dry run successfully prepared contents for spec.'\n f' out dir: \"{self.out_dir}\", spec fork: \"{self.spec_fork}\", build target: \"{name}\"')\n self.debug_print(spec_str)\n else:\n with open(os.path.join(self.out_dir, name+'.py'), 'w') as out:\n out.write(spec_str)\n\n if not self.dry_run:\n with open(os.path.join(self.out_dir, '__init__.py'), 'w') as out:\n # `mainnet` is the default spec.\n out.write(\"from . import mainnet as spec # noqa:F401\\n\")\n\n\nclass BuildPyCommand(build_py):\n \"\"\"Customize the build command to run the spec-builder on setup.py build\"\"\"\n\n def initialize_options(self):\n super(BuildPyCommand, self).initialize_options()\n\n def run_pyspec_cmd(self, spec_fork: str, **opts):\n cmd_obj: PySpecCommand = self.distribution.reinitialize_command(\"pyspec\")\n cmd_obj.spec_fork = spec_fork\n cmd_obj.out_dir = os.path.join(self.build_lib, 'eth2spec', spec_fork)\n for k, v in opts.items():\n setattr(cmd_obj, k, v)\n self.run_command('pyspec')\n\n def run(self):\n for spec_fork in spec_builders:\n self.run_pyspec_cmd(spec_fork=spec_fork)\n\n super(BuildPyCommand, self).run()\n\n\nclass PyspecDevCommand(Command):\n \"\"\"Build the markdown files in-place to their source location for testing.\"\"\"\n description = \"Build the markdown files in-place to their source location for testing.\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run_pyspec_cmd(self, spec_fork: str, **opts):\n cmd_obj: PySpecCommand = self.distribution.reinitialize_command(\"pyspec\")\n cmd_obj.spec_fork = spec_fork\n eth2spec_dir = convert_path(self.distribution.package_dir['eth2spec'])\n cmd_obj.out_dir = os.path.join(eth2spec_dir, spec_fork)\n for k, v in opts.items():\n setattr(cmd_obj, k, v)\n self.run_command('pyspec')\n\n def run(self):\n print(\"running build_py command\")\n for spec_fork in spec_builders:\n self.run_pyspec_cmd(spec_fork=spec_fork)\n\ncommands = {\n 'pyspec': PySpecCommand,\n 'build_py': BuildPyCommand,\n 'pyspecdev': PyspecDevCommand,\n}\n\nwith open(\"README.md\", \"rt\", encoding=\"utf8\") as f:\n readme = f.read()\n\n# How to use \"VERSION.txt\" file:\n# - dev branch contains \"X.Y.Z.dev\", where \"X.Y.Z\" is the target version to release dev into.\n# -> Changed as part of 'master' backport to 'dev'\n# - master branch contains \"X.Y.Z\", where \"X.Y.Z\" is the current version.\n# -> Changed as part of 'dev' release (or other branch) into 'master'\n# -> In case of a commit on master without git tag, target the next version\n# with \".postN\" (release candidate, numbered) suffixed.\n# See https://www.python.org/dev/peps/pep-0440/#public-version-identifiers\nwith open(os.path.join('tests', 'core', 'pyspec', 'eth2spec', 'VERSION.txt')) as f:\n spec_version = f.read().strip()\n\nsetup(\n name='eth2spec',\n version=spec_version,\n description=\"Eth2 spec, provided as Python package for tooling and testing\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"ethereum\",\n url=\"https://github.com/ethereum/eth2.0-specs\",\n include_package_data=False,\n package_data={'configs': ['*.yaml'],\n 'presets': ['*.yaml'],\n 'specs': ['**/*.md'],\n 'eth2spec': ['VERSION.txt']},\n package_dir={\n \"eth2spec\": \"tests/core/pyspec/eth2spec\",\n \"configs\": \"configs\",\n \"presets\": \"presets\",\n \"specs\": \"specs\",\n },\n packages=find_packages(where='tests/core/pyspec') + ['configs', 'specs'],\n py_modules=[\"eth2spec\"],\n cmdclass=commands,\n python_requires=\">=3.8, <4\",\n extras_require={\n \"test\": [\"pytest>=4.4\", \"pytest-cov\", \"pytest-xdist\"],\n \"lint\": [\"flake8==3.7.7\", \"mypy==0.812\"],\n \"generator\": [\"python-snappy==0.5.4\"],\n },\n install_requires=[\n \"eth-utils>=1.3.0,<2\",\n \"eth-typing>=2.1.0,<3.0.0\",\n \"pycryptodome==3.9.4\",\n \"py_ecc==5.2.0\",\n \"milagro_bls_binding==1.6.3\",\n \"dataclasses==0.6\",\n \"remerkleable==0.1.24\",\n RUAMEL_YAML_VERSION,\n \"lru-dict==1.1.6\",\n MARKO_VERSION,\n ]\n)\n",
"path": "setup.py"
}
] | [
{
"content": "from setuptools import setup, find_packages, Command\nfrom setuptools.command.build_py import build_py\nfrom distutils import dir_util\nfrom distutils.util import convert_path\nfrom pathlib import Path\nimport os\nimport re\nimport string\nimport textwrap\nfrom typing import Dict, NamedTuple, List, Sequence, Optional, TypeVar\nfrom abc import ABC, abstractmethod\nimport ast\nimport subprocess\nimport sys\n\n# NOTE: have to programmatically include third-party dependencies in `setup.py`.\ndef installPackage(package: str):\n subprocess.check_call([sys.executable, '-m', 'pip', 'install', package])\n\nRUAMEL_YAML_VERSION = \"ruamel.yaml==0.16.5\"\ntry:\n import ruamel.yaml\nexcept ImportError:\n installPackage(RUAMEL_YAML_VERSION)\n\nfrom ruamel.yaml import YAML\n\nMARKO_VERSION = \"marko==1.0.2\"\ntry:\n import marko\nexcept ImportError:\n installPackage(MARKO_VERSION)\n\nfrom marko.block import Heading, FencedCode, LinkRefDef, BlankLine\nfrom marko.inline import CodeSpan\nfrom marko.ext.gfm import gfm\nfrom marko.ext.gfm.elements import Table\n\n\n# Definitions in context.py\nPHASE0 = 'phase0'\nALTAIR = 'altair'\nMERGE = 'merge'\n\n# The helper functions that are used when defining constants\nCONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS = '''\ndef ceillog2(x: int) -> uint64:\n if x < 1:\n raise ValueError(f\"ceillog2 accepts only positive values, x={x}\")\n return uint64((x - 1).bit_length())\n\n\ndef floorlog2(x: int) -> uint64:\n if x < 1:\n raise ValueError(f\"floorlog2 accepts only positive values, x={x}\")\n return uint64(x.bit_length() - 1)\n'''\n\n\nOPTIMIZED_BLS_AGGREGATE_PUBKEYS = '''\ndef eth_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey:\n return bls.AggregatePKs(pubkeys)\n'''\n\n\nclass ProtocolDefinition(NamedTuple):\n # just function definitions currently. May expand with configuration vars in future.\n functions: Dict[str, str]\n\n\nclass VariableDefinition(NamedTuple):\n type_name: Optional[str]\n value: str\n comment: Optional[str] # e.g. \"noqa: E501\"\n\n\nclass SpecObject(NamedTuple):\n functions: Dict[str, str]\n protocols: Dict[str, ProtocolDefinition]\n custom_types: Dict[str, str]\n constant_vars: Dict[str, VariableDefinition]\n preset_vars: Dict[str, VariableDefinition]\n config_vars: Dict[str, VariableDefinition]\n ssz_dep_constants: Dict[str, str] # the constants that depend on ssz_objects\n ssz_objects: Dict[str, str]\n dataclasses: Dict[str, str]\n\n\ndef _get_name_from_heading(heading: Heading) -> Optional[str]:\n last_child = heading.children[-1]\n if isinstance(last_child, CodeSpan):\n return last_child.children\n return None\n\n\ndef _get_source_from_code_block(block: FencedCode) -> str:\n return block.children[0].children.strip()\n\n\ndef _get_function_name_from_source(source: str) -> str:\n fn = ast.parse(source).body[0]\n return fn.name\n\n\ndef _get_self_type_from_source(source: str) -> Optional[str]:\n fn = ast.parse(source).body[0]\n args = fn.args.args\n if len(args) == 0:\n return None\n if args[0].arg != 'self':\n return None\n if args[0].annotation is None:\n return None\n return args[0].annotation.id\n\n\ndef _get_class_info_from_source(source: str) -> (str, Optional[str]):\n class_def = ast.parse(source).body[0]\n base = class_def.bases[0]\n if isinstance(base, ast.Name):\n parent_class = base.id\n else:\n # NOTE: SSZ definition derives from earlier phase...\n # e.g. `phase0.SignedBeaconBlock`\n # TODO: check for consistency with other phases\n parent_class = None\n return class_def.name, parent_class\n\n\ndef _is_constant_id(name: str) -> bool:\n if name[0] not in string.ascii_uppercase + '_':\n return False\n return all(map(lambda c: c in string.ascii_uppercase + '_' + string.digits, name[1:]))\n\n\nETH2_SPEC_COMMENT_PREFIX = \"eth2spec:\"\n\n\ndef _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]:\n _, _, title = child._parse_info\n if not (title[0] == \"(\" and title[len(title)-1] == \")\"):\n return None\n title = title[1:len(title)-1]\n if not title.startswith(ETH2_SPEC_COMMENT_PREFIX):\n return None\n return title[len(ETH2_SPEC_COMMENT_PREFIX):].strip()\n\n\ndef _parse_value(name: str, typed_value: str) -> VariableDefinition:\n comment = None\n if name == \"BLS12_381_Q\":\n comment = \"noqa: E501\"\n\n typed_value = typed_value.strip()\n if '(' not in typed_value:\n return VariableDefinition(type_name=None, value=typed_value, comment=comment)\n i = typed_value.index('(')\n type_name = typed_value[:i]\n\n return VariableDefinition(type_name=type_name, value=typed_value[i+1:-1], comment=comment)\n\n\ndef get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str]) -> SpecObject:\n functions: Dict[str, str] = {}\n protocols: Dict[str, ProtocolDefinition] = {}\n constant_vars: Dict[str, VariableDefinition] = {}\n preset_vars: Dict[str, VariableDefinition] = {}\n config_vars: Dict[str, VariableDefinition] = {}\n ssz_dep_constants: Dict[str, str] = {}\n ssz_objects: Dict[str, str] = {}\n dataclasses: Dict[str, str] = {}\n custom_types: Dict[str, str] = {}\n\n with open(file_name) as source_file:\n document = gfm.parse(source_file.read())\n\n current_name = None\n should_skip = False\n for child in document.children:\n if isinstance(child, BlankLine):\n continue\n if should_skip:\n should_skip = False\n continue\n if isinstance(child, Heading):\n current_name = _get_name_from_heading(child)\n elif isinstance(child, FencedCode):\n if child.lang != \"python\":\n continue\n source = _get_source_from_code_block(child)\n if source.startswith(\"def\"):\n current_name = _get_function_name_from_source(source)\n self_type_name = _get_self_type_from_source(source)\n function_def = \"\\n\".join(line.rstrip() for line in source.splitlines())\n if self_type_name is None:\n functions[current_name] = function_def\n else:\n if self_type_name not in protocols:\n protocols[self_type_name] = ProtocolDefinition(functions={})\n protocols[self_type_name].functions[current_name] = function_def\n elif source.startswith(\"@dataclass\"):\n dataclasses[current_name] = \"\\n\".join(line.rstrip() for line in source.splitlines())\n elif source.startswith(\"class\"):\n class_name, parent_class = _get_class_info_from_source(source)\n # check consistency with spec\n assert class_name == current_name\n if parent_class:\n assert parent_class == \"Container\"\n # NOTE: trim whitespace from spec\n ssz_objects[current_name] = \"\\n\".join(line.rstrip() for line in source.splitlines())\n else:\n raise Exception(\"unrecognized python code element: \" + source)\n elif isinstance(child, Table):\n for row in child.children:\n cells = row.children\n if len(cells) >= 2:\n name_cell = cells[0]\n name = name_cell.children[0].children\n\n value_cell = cells[1]\n value = value_cell.children[0].children\n if isinstance(value, list):\n # marko parses `**X**` as a list containing a X\n value = value[0].children\n\n if not _is_constant_id(name):\n # Check for short type declarations\n if value.startswith((\"uint\", \"Bytes\", \"ByteList\", \"Union\")):\n custom_types[name] = value\n continue\n\n if value.startswith(\"get_generalized_index\"):\n ssz_dep_constants[name] = value\n continue\n\n value_def = _parse_value(name, value)\n if name in preset:\n preset_vars[name] = VariableDefinition(value_def.type_name, preset[name], value_def.comment)\n elif name in config:\n config_vars[name] = VariableDefinition(value_def.type_name, config[name], value_def.comment)\n else:\n constant_vars[name] = value_def\n\n elif isinstance(child, LinkRefDef):\n comment = _get_eth2_spec_comment(child)\n if comment == \"skip\":\n should_skip = True\n\n return SpecObject(\n functions=functions,\n protocols=protocols,\n custom_types=custom_types,\n constant_vars=constant_vars,\n preset_vars=preset_vars,\n config_vars=config_vars,\n ssz_dep_constants=ssz_dep_constants,\n ssz_objects=ssz_objects,\n dataclasses=dataclasses,\n )\n\n\nclass SpecBuilder(ABC):\n @property\n @abstractmethod\n def fork(self) -> str:\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def imports(cls, preset_name: str) -> str:\n \"\"\"\n Import objects from other libraries.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def preparations(cls) -> str:\n \"\"\"\n Define special types/constants for building pyspec or call functions.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def sundry_functions(cls) -> str:\n \"\"\"\n The functions that are (1) defined abstractly in specs or (2) adjusted for getting better performance.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:\n \"\"\"\n The constants that are required for SSZ objects.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]: # TODO\n \"\"\"\n The constants that are required for custom types.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]:\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def build_spec(cls, preset_name: str,\n source_files: List[Path], preset_files: Sequence[Path], config_file: Path) -> str:\n raise NotImplementedError()\n\n\n#\n# Phase0SpecBuilder\n#\nclass Phase0SpecBuilder(SpecBuilder):\n fork: str = PHASE0\n\n @classmethod\n def imports(cls, preset_name: str) -> str:\n return '''from lru import LRU\nfrom dataclasses import (\n dataclass,\n field,\n)\nfrom typing import (\n Any, Callable, Dict, Set, Sequence, Tuple, Optional, TypeVar, NamedTuple\n)\n\nfrom eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes\nfrom eth2spec.utils.ssz.ssz_typing import (\n View, boolean, Container, List, Vector, uint8, uint32, uint64,\n Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist)\nfrom eth2spec.utils.ssz.ssz_typing import Bitvector # noqa: F401\nfrom eth2spec.utils import bls\nfrom eth2spec.utils.hash_function import hash\n'''\n\n @classmethod\n def preparations(cls) -> str:\n return '''\nSSZObject = TypeVar('SSZObject', bound=View)\n'''\n\n @classmethod\n def sundry_functions(cls) -> str:\n return '''\ndef get_eth1_data(block: Eth1Block) -> Eth1Data:\n \"\"\"\n A stub function return mocking Eth1Data.\n \"\"\"\n return Eth1Data(\n deposit_root=block.deposit_root,\n deposit_count=block.deposit_count,\n block_hash=hash_tree_root(block))\n\n\ndef cache_this(key_fn, value_fn, lru_size): # type: ignore\n cache_dict = LRU(size=lru_size)\n\n def wrapper(*args, **kw): # type: ignore\n key = key_fn(*args, **kw)\n nonlocal cache_dict\n if key not in cache_dict:\n cache_dict[key] = value_fn(*args, **kw)\n return cache_dict[key]\n return wrapper\n\n\n_compute_shuffled_index = compute_shuffled_index\ncompute_shuffled_index = cache_this(\n lambda index, index_count, seed: (index, index_count, seed),\n _compute_shuffled_index, lru_size=SLOTS_PER_EPOCH * 3)\n\n_get_total_active_balance = get_total_active_balance\nget_total_active_balance = cache_this(\n lambda state: (state.validators.hash_tree_root(), compute_epoch_at_slot(state.slot)),\n _get_total_active_balance, lru_size=10)\n\n_get_base_reward = get_base_reward\nget_base_reward = cache_this(\n lambda state, index: (state.validators.hash_tree_root(), state.slot, index),\n _get_base_reward, lru_size=2048)\n\n_get_committee_count_per_slot = get_committee_count_per_slot\nget_committee_count_per_slot = cache_this(\n lambda state, epoch: (state.validators.hash_tree_root(), epoch),\n _get_committee_count_per_slot, lru_size=SLOTS_PER_EPOCH * 3)\n\n_get_active_validator_indices = get_active_validator_indices\nget_active_validator_indices = cache_this(\n lambda state, epoch: (state.validators.hash_tree_root(), epoch),\n _get_active_validator_indices, lru_size=3)\n\n_get_beacon_committee = get_beacon_committee\nget_beacon_committee = cache_this(\n lambda state, slot, index: (state.validators.hash_tree_root(), state.randao_mixes.hash_tree_root(), slot, index),\n _get_beacon_committee, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)\n\n_get_matching_target_attestations = get_matching_target_attestations\nget_matching_target_attestations = cache_this(\n lambda state, epoch: (state.hash_tree_root(), epoch),\n _get_matching_target_attestations, lru_size=10)\n\n_get_matching_head_attestations = get_matching_head_attestations\nget_matching_head_attestations = cache_this(\n lambda state, epoch: (state.hash_tree_root(), epoch),\n _get_matching_head_attestations, lru_size=10)\n\n_get_attesting_indices = get_attesting_indices\nget_attesting_indices = cache_this(\n lambda state, data, bits: (\n state.randao_mixes.hash_tree_root(),\n state.validators.hash_tree_root(), data.hash_tree_root(), bits.hash_tree_root()\n ),\n _get_attesting_indices, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)'''\n\n @classmethod\n def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:\n return {}\n\n @classmethod\n def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]:\n return {}\n\n @classmethod\n def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]:\n return functions\n\n @classmethod\n def build_spec(cls, preset_name: str,\n source_files: Sequence[Path], preset_files: Sequence[Path], config_file: Path) -> str:\n return _build_spec(preset_name, cls.fork, source_files, preset_files, config_file)\n\n\n#\n# AltairSpecBuilder\n#\nclass AltairSpecBuilder(Phase0SpecBuilder):\n fork: str = ALTAIR\n\n @classmethod\n def imports(cls, preset_name: str) -> str:\n return super().imports(preset_name) + '\\n' + f'''\nfrom typing import NewType, Union as PyUnion\n\nfrom eth2spec.phase0 import {preset_name} as phase0\nfrom eth2spec.utils.ssz.ssz_typing import Path\n'''\n\n @classmethod\n def preparations(cls):\n return super().preparations() + '\\n' + '''\nSSZVariableName = str\nGeneralizedIndex = NewType('GeneralizedIndex', int)\n'''\n\n @classmethod\n def sundry_functions(cls) -> str:\n return super().sundry_functions() + '\\n\\n' + '''\ndef get_generalized_index(ssz_class: Any, *path: Sequence[PyUnion[int, SSZVariableName]]) -> GeneralizedIndex:\n ssz_path = Path(ssz_class)\n for item in path:\n ssz_path = ssz_path / item\n return GeneralizedIndex(ssz_path.gindex())'''\n\n\n @classmethod\n def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:\n constants = {\n 'FINALIZED_ROOT_INDEX': 'GeneralizedIndex(105)',\n 'NEXT_SYNC_COMMITTEE_INDEX': 'GeneralizedIndex(55)',\n }\n return {**super().hardcoded_ssz_dep_constants(), **constants}\n\n @classmethod\n def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]:\n if \"eth_aggregate_pubkeys\" in functions:\n functions[\"eth_aggregate_pubkeys\"] = OPTIMIZED_BLS_AGGREGATE_PUBKEYS.strip()\n return super().implement_optimizations(functions)\n\n#\n# MergeSpecBuilder\n#\nclass MergeSpecBuilder(AltairSpecBuilder):\n fork: str = MERGE\n\n @classmethod\n def imports(cls, preset_name: str):\n return super().imports(preset_name) + f'''\nfrom typing import Protocol\nfrom eth2spec.altair import {preset_name} as altair\nfrom eth2spec.utils.ssz.ssz_typing import Bytes8, Bytes20, ByteList, ByteVector, uint256\n'''\n\n @classmethod\n def preparations(cls):\n return super().preparations()\n\n @classmethod\n def sundry_functions(cls) -> str:\n return super().sundry_functions() + '\\n\\n' + \"\"\"\nExecutionState = Any\n\n\ndef get_pow_block(hash: Bytes32) -> Optional[PowBlock]:\n return PowBlock(block_hash=hash, parent_hash=Bytes32(), total_difficulty=uint256(0))\n\n\ndef get_execution_state(execution_state_root: Bytes32) -> ExecutionState:\n pass\n\n\ndef get_pow_chain_head() -> PowBlock:\n pass\n\n\nclass NoopExecutionEngine(ExecutionEngine):\n\n def execute_payload(self: ExecutionEngine, execution_payload: ExecutionPayload) -> bool:\n return True\n\n def notify_forkchoice_updated(self: ExecutionEngine,\n head_block_hash: Hash32,\n finalized_block_hash: Hash32,\n payload_attributes: Optional[PayloadAttributes]) -> Optional[PayloadId]:\n pass\n\n def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> ExecutionPayload:\n raise NotImplementedError(\"no default block production\")\n\n\nEXECUTION_ENGINE = NoopExecutionEngine()\"\"\"\n\n\n @classmethod\n def hardcoded_custom_type_dep_constants(cls) -> str:\n constants = {\n 'MAX_BYTES_PER_TRANSACTION': 'uint64(2**30)',\n }\n return {**super().hardcoded_custom_type_dep_constants(), **constants}\n\n\nspec_builders = {\n builder.fork: builder\n for builder in (Phase0SpecBuilder, AltairSpecBuilder, MergeSpecBuilder)\n}\n\n\ndef is_spec_defined_type(value: str) -> bool:\n return value.startswith('ByteList') or value.startswith('Union')\n\n\ndef objects_to_spec(preset_name: str,\n spec_object: SpecObject,\n builder: SpecBuilder,\n ordered_class_objects: Dict[str, str]) -> str:\n \"\"\"\n Given all the objects that constitute a spec, combine them into a single pyfile.\n \"\"\"\n new_type_definitions = (\n '\\n\\n'.join(\n [\n f\"class {key}({value}):\\n pass\\n\"\n for key, value in spec_object.custom_types.items()\n if not is_spec_defined_type(value)\n ]\n )\n + ('\\n\\n' if len([key for key, value in spec_object.custom_types.items() if is_spec_defined_type(value)]) > 0 else '')\n + '\\n\\n'.join(\n [\n f\"{key} = {value}\\n\"\n for key, value in spec_object.custom_types.items()\n if is_spec_defined_type(value)\n ]\n )\n )\n\n def format_protocol(protocol_name: str, protocol_def: ProtocolDefinition) -> str:\n protocol = f\"class {protocol_name}(Protocol):\"\n for fn_source in protocol_def.functions.values():\n fn_source = fn_source.replace(\"self: \"+protocol_name, \"self\")\n protocol += \"\\n\\n\" + textwrap.indent(fn_source, \" \")\n return protocol\n\n protocols_spec = '\\n\\n\\n'.join(format_protocol(k, v) for k, v in spec_object.protocols.items())\n for k in list(spec_object.functions):\n if \"ceillog2\" in k or \"floorlog2\" in k:\n del spec_object.functions[k]\n functions = builder.implement_optimizations(spec_object.functions)\n functions_spec = '\\n\\n\\n'.join(functions.values())\n\n # Access global dict of config vars for runtime configurables\n for name in spec_object.config_vars.keys():\n functions_spec = re.sub(r\"\\b%s\\b\" % name, 'config.' + name, functions_spec)\n\n def format_config_var(name: str, vardef: VariableDefinition) -> str:\n if vardef.type_name is None:\n out = f'{name}={vardef.value},'\n else:\n out = f'{name}={vardef.type_name}({vardef.value}),'\n if vardef.comment is not None:\n out += f' # {vardef.comment}'\n return out\n\n config_spec = 'class Configuration(NamedTuple):\\n'\n config_spec += ' PRESET_BASE: str\\n'\n config_spec += '\\n'.join(f' {k}: {v.type_name if v.type_name is not None else \"int\"}'\n for k, v in spec_object.config_vars.items())\n config_spec += '\\n\\n\\nconfig = Configuration(\\n'\n config_spec += f' PRESET_BASE=\"{preset_name}\",\\n'\n config_spec += '\\n'.join(' ' + format_config_var(k, v) for k, v in spec_object.config_vars.items())\n config_spec += '\\n)\\n'\n\n def format_constant(name: str, vardef: VariableDefinition) -> str:\n if vardef.type_name is None:\n out = f'{name} = {vardef.value}'\n else:\n out = f'{name} = {vardef.type_name}({vardef.value})'\n if vardef.comment is not None:\n out += f' # {vardef.comment}'\n return out\n\n constant_vars_spec = '# Constant vars\\n' + '\\n'.join(format_constant(k, v) for k, v in spec_object.constant_vars.items())\n preset_vars_spec = '# Preset vars\\n' + '\\n'.join(format_constant(k, v) for k, v in spec_object.preset_vars.items())\n ordered_class_objects_spec = '\\n\\n\\n'.join(ordered_class_objects.values())\n ssz_dep_constants = '\\n'.join(map(lambda x: '%s = %s' % (x, builder.hardcoded_ssz_dep_constants()[x]), builder.hardcoded_ssz_dep_constants()))\n ssz_dep_constants_verification = '\\n'.join(map(lambda x: 'assert %s == %s' % (x, spec_object.ssz_dep_constants[x]), builder.hardcoded_ssz_dep_constants()))\n custom_type_dep_constants = '\\n'.join(map(lambda x: '%s = %s' % (x, builder.hardcoded_custom_type_dep_constants()[x]), builder.hardcoded_custom_type_dep_constants()))\n spec = (\n builder.imports(preset_name)\n + builder.preparations()\n + '\\n\\n' + f\"fork = \\'{builder.fork}\\'\\n\"\n # The constants that some SSZ containers require. Need to be defined before `new_type_definitions`\n + ('\\n\\n' + custom_type_dep_constants + '\\n' if custom_type_dep_constants != '' else '')\n + '\\n\\n' + new_type_definitions\n + '\\n' + CONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS\n # The constants that some SSZ containers require. Need to be defined before `constants_spec`\n + ('\\n\\n' + ssz_dep_constants if ssz_dep_constants != '' else '')\n + '\\n\\n' + constant_vars_spec\n + '\\n\\n' + preset_vars_spec\n + '\\n\\n\\n' + config_spec\n + '\\n\\n' + ordered_class_objects_spec\n + ('\\n\\n\\n' + protocols_spec if protocols_spec != '' else '')\n + '\\n\\n\\n' + functions_spec\n + '\\n\\n' + builder.sundry_functions()\n # Since some constants are hardcoded in setup.py, the following assertions verify that the hardcoded constants are\n # as same as the spec definition.\n + ('\\n\\n\\n' + ssz_dep_constants_verification if ssz_dep_constants_verification != '' else '')\n + '\\n'\n )\n return spec\n\n\ndef combine_protocols(old_protocols: Dict[str, ProtocolDefinition],\n new_protocols: Dict[str, ProtocolDefinition]) -> Dict[str, ProtocolDefinition]:\n for key, value in new_protocols.items():\n if key not in old_protocols:\n old_protocols[key] = value\n else:\n functions = combine_dicts(old_protocols[key].functions, value.functions)\n old_protocols[key] = ProtocolDefinition(functions=functions)\n return old_protocols\n\n\nT = TypeVar('T')\n\n\ndef combine_dicts(old_dict: Dict[str, T], new_dict: Dict[str, T]) -> Dict[str, T]:\n return {**old_dict, **new_dict}\n\n\nignored_dependencies = [\n 'bit', 'boolean', 'Vector', 'List', 'Container', 'BLSPubkey', 'BLSSignature',\n 'Bytes1', 'Bytes4', 'Bytes8', 'Bytes20', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',\n 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\n 'bytes', 'byte', 'ByteList', 'ByteVector',\n 'Dict', 'dict', 'field', 'ceillog2', 'floorlog2', 'Set',\n 'Optional',\n]\n\n\ndef dependency_order_class_objects(objects: Dict[str, str], custom_types: Dict[str, str]) -> None:\n \"\"\"\n Determines which SSZ Object is dependent on which other and orders them appropriately\n \"\"\"\n items = list(objects.items())\n for key, value in items:\n dependencies = []\n for line in value.split('\\n'):\n if not re.match(r'\\s+\\w+: .+', line):\n continue # skip whitespace etc.\n line = line[line.index(':') + 1:] # strip of field name\n if '#' in line:\n line = line[:line.index('#')] # strip of comment\n dependencies.extend(re.findall(r'(\\w+)', line)) # catch all legible words, potential dependencies\n dependencies = filter(lambda x: '_' not in x and x.upper() != x, dependencies) # filter out constants\n dependencies = filter(lambda x: x not in ignored_dependencies, dependencies)\n dependencies = filter(lambda x: x not in custom_types, dependencies)\n for dep in dependencies:\n key_list = list(objects.keys())\n for item in [dep, key] + key_list[key_list.index(dep)+1:]:\n objects[item] = objects.pop(item)\n\n\ndef combine_ssz_objects(old_objects: Dict[str, str], new_objects: Dict[str, str], custom_types) -> Dict[str, str]:\n \"\"\"\n Takes in old spec and new spec ssz objects, combines them,\n and returns the newer versions of the objects in dependency order.\n \"\"\"\n for key, value in new_objects.items():\n old_objects[key] = value\n return old_objects\n\n\ndef combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:\n \"\"\"\n Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function.\n \"\"\"\n protocols = combine_protocols(spec0.protocols, spec1.protocols)\n functions = combine_dicts(spec0.functions, spec1.functions)\n custom_types = combine_dicts(spec0.custom_types, spec1.custom_types)\n constant_vars = combine_dicts(spec0.constant_vars, spec1.constant_vars)\n preset_vars = combine_dicts(spec0.preset_vars, spec1.preset_vars)\n config_vars = combine_dicts(spec0.config_vars, spec1.config_vars)\n ssz_dep_constants = combine_dicts(spec0.ssz_dep_constants, spec1.ssz_dep_constants)\n ssz_objects = combine_ssz_objects(spec0.ssz_objects, spec1.ssz_objects, custom_types)\n dataclasses = combine_dicts(spec0.dataclasses, spec1.dataclasses)\n return SpecObject(\n functions=functions,\n protocols=protocols,\n custom_types=custom_types,\n constant_vars=constant_vars,\n preset_vars=preset_vars,\n config_vars=config_vars,\n ssz_dep_constants=ssz_dep_constants,\n ssz_objects=ssz_objects,\n dataclasses=dataclasses,\n )\n\n\ndef parse_config_vars(conf: Dict[str, str]) -> Dict[str, str]:\n \"\"\"\n Parses a dict of basic str/int/list types into a dict for insertion into the spec code.\n \"\"\"\n out: Dict[str, str] = dict()\n for k, v in conf.items():\n if isinstance(v, str) and (v.startswith(\"0x\") or k == 'PRESET_BASE'):\n # Represent byte data with string, to avoid misinterpretation as big-endian int.\n # Everything is either byte data or an integer, with PRESET_BASE as one exception.\n out[k] = f\"'{v}'\"\n else:\n out[k] = str(int(v))\n return out\n\n\ndef load_preset(preset_files: Sequence[Path]) -> Dict[str, str]:\n \"\"\"\n Loads the a directory of preset files, merges the result into one preset.\n \"\"\"\n preset = {}\n for fork_file in preset_files:\n yaml = YAML(typ='base')\n fork_preset: dict = yaml.load(fork_file)\n if fork_preset is None: # for empty YAML files\n continue\n if not set(fork_preset.keys()).isdisjoint(preset.keys()):\n duplicates = set(fork_preset.keys()).intersection(set(preset.keys()))\n raise Exception(f\"duplicate config var(s) in preset files: {', '.join(duplicates)}\")\n preset.update(fork_preset)\n assert preset != {}\n return parse_config_vars(preset)\n\n\ndef load_config(config_path: Path) -> Dict[str, str]:\n \"\"\"\n Loads the given configuration file.\n \"\"\"\n yaml = YAML(typ='base')\n config_data = yaml.load(config_path)\n return parse_config_vars(config_data)\n\n\ndef _build_spec(preset_name: str, fork: str,\n source_files: Sequence[Path], preset_files: Sequence[Path], config_file: Path) -> str:\n preset = load_preset(preset_files)\n config = load_config(config_file)\n all_specs = [get_spec(spec, preset, config) for spec in source_files]\n\n spec_object = all_specs[0]\n for value in all_specs[1:]:\n spec_object = combine_spec_objects(spec_object, value)\n\n class_objects = {**spec_object.ssz_objects, **spec_object.dataclasses}\n dependency_order_class_objects(class_objects, spec_object.custom_types)\n\n return objects_to_spec(preset_name, spec_object, spec_builders[fork], class_objects)\n\n\nclass BuildTarget(NamedTuple):\n name: str\n preset_paths: List[Path]\n config_path: Path\n\n\nclass PySpecCommand(Command):\n \"\"\"Convert spec markdown files to a spec python file\"\"\"\n\n description = \"Convert spec markdown files to a spec python file\"\n\n spec_fork: str\n md_doc_paths: str\n parsed_md_doc_paths: List[str]\n build_targets: str\n parsed_build_targets: List[BuildTarget]\n out_dir: str\n\n # The format is (long option, short option, description).\n user_options = [\n ('spec-fork=', None, \"Spec fork to tag build with. Used to select md-docs defaults.\"),\n ('md-doc-paths=', None, \"List of paths of markdown files to build spec with\"),\n ('build-targets=', None, \"Names, directory paths of compile-time presets, and default config paths.\"),\n ('out-dir=', None, \"Output directory to write spec package to\")\n ]\n\n def initialize_options(self):\n \"\"\"Set default values for options.\"\"\"\n # Each user option must be listed here with their default value.\n self.spec_fork = PHASE0\n self.md_doc_paths = ''\n self.out_dir = 'pyspec_output'\n self.build_targets = \"\"\"\n minimal:presets/minimal:configs/minimal.yaml\n mainnet:presets/mainnet:configs/mainnet.yaml\n \"\"\"\n\n def finalize_options(self):\n \"\"\"Post-process options.\"\"\"\n if len(self.md_doc_paths) == 0:\n print(\"no paths were specified, using default markdown file paths for pyspec\"\n \" build (spec fork: %s)\" % self.spec_fork)\n if self.spec_fork in (PHASE0, ALTAIR, MERGE):\n self.md_doc_paths = \"\"\"\n specs/phase0/beacon-chain.md\n specs/phase0/fork-choice.md\n specs/phase0/validator.md\n specs/phase0/weak-subjectivity.md\n \"\"\"\n if self.spec_fork in (ALTAIR, MERGE):\n self.md_doc_paths += \"\"\"\n specs/altair/beacon-chain.md\n specs/altair/bls.md\n specs/altair/fork.md\n specs/altair/validator.md\n specs/altair/p2p-interface.md\n specs/altair/sync-protocol.md\n \"\"\"\n if self.spec_fork == MERGE:\n self.md_doc_paths += \"\"\"\n specs/merge/beacon-chain.md\n specs/merge/fork.md\n specs/merge/fork-choice.md\n specs/merge/validator.md\n \"\"\"\n if len(self.md_doc_paths) == 0:\n raise Exception('no markdown files specified, and spec fork \"%s\" is unknown', self.spec_fork)\n\n self.parsed_md_doc_paths = self.md_doc_paths.split()\n\n for filename in self.parsed_md_doc_paths:\n if not os.path.exists(filename):\n raise Exception('Pyspec markdown input file \"%s\" does not exist.' % filename)\n\n self.parsed_build_targets = []\n for target in self.build_targets.split():\n target = target.strip()\n data = target.split(':')\n if len(data) != 3:\n raise Exception('invalid target, expected \"name:preset_dir:config_file\" format, but got: %s' % target)\n name, preset_dir_path, config_path = data\n if any((c not in string.digits + string.ascii_letters) for c in name):\n raise Exception('invalid target name: \"%s\"' % name)\n if not os.path.exists(preset_dir_path):\n raise Exception('Preset dir \"%s\" does not exist' % preset_dir_path)\n _, _, preset_file_names = next(os.walk(preset_dir_path))\n preset_paths = [(Path(preset_dir_path) / name) for name in preset_file_names]\n\n if not os.path.exists(config_path):\n raise Exception('Config file \"%s\" does not exist' % config_path)\n self.parsed_build_targets.append(BuildTarget(name, preset_paths, Path(config_path)))\n\n def run(self):\n if not self.dry_run:\n dir_util.mkpath(self.out_dir)\n\n for (name, preset_paths, config_path) in self.parsed_build_targets:\n spec_str = spec_builders[self.spec_fork].build_spec(\n name, self.parsed_md_doc_paths, preset_paths, config_path)\n if self.dry_run:\n self.announce('dry run successfully prepared contents for spec.'\n f' out dir: \"{self.out_dir}\", spec fork: \"{self.spec_fork}\", build target: \"{name}\"')\n self.debug_print(spec_str)\n else:\n with open(os.path.join(self.out_dir, name+'.py'), 'w') as out:\n out.write(spec_str)\n\n if not self.dry_run:\n with open(os.path.join(self.out_dir, '__init__.py'), 'w') as out:\n # `mainnet` is the default spec.\n out.write(\"from . import mainnet as spec # noqa:F401\\n\")\n\n\nclass BuildPyCommand(build_py):\n \"\"\"Customize the build command to run the spec-builder on setup.py build\"\"\"\n\n def initialize_options(self):\n super(BuildPyCommand, self).initialize_options()\n\n def run_pyspec_cmd(self, spec_fork: str, **opts):\n cmd_obj: PySpecCommand = self.distribution.reinitialize_command(\"pyspec\")\n cmd_obj.spec_fork = spec_fork\n cmd_obj.out_dir = os.path.join(self.build_lib, 'eth2spec', spec_fork)\n for k, v in opts.items():\n setattr(cmd_obj, k, v)\n self.run_command('pyspec')\n\n def run(self):\n for spec_fork in spec_builders:\n self.run_pyspec_cmd(spec_fork=spec_fork)\n\n super(BuildPyCommand, self).run()\n\n\nclass PyspecDevCommand(Command):\n \"\"\"Build the markdown files in-place to their source location for testing.\"\"\"\n description = \"Build the markdown files in-place to their source location for testing.\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run_pyspec_cmd(self, spec_fork: str, **opts):\n cmd_obj: PySpecCommand = self.distribution.reinitialize_command(\"pyspec\")\n cmd_obj.spec_fork = spec_fork\n eth2spec_dir = convert_path(self.distribution.package_dir['eth2spec'])\n cmd_obj.out_dir = os.path.join(eth2spec_dir, spec_fork)\n for k, v in opts.items():\n setattr(cmd_obj, k, v)\n self.run_command('pyspec')\n\n def run(self):\n print(\"running build_py command\")\n for spec_fork in spec_builders:\n self.run_pyspec_cmd(spec_fork=spec_fork)\n\ncommands = {\n 'pyspec': PySpecCommand,\n 'build_py': BuildPyCommand,\n 'pyspecdev': PyspecDevCommand,\n}\n\nwith open(\"README.md\", \"rt\", encoding=\"utf8\") as f:\n readme = f.read()\n\n# How to use \"VERSION.txt\" file:\n# - dev branch contains \"X.Y.Z.dev\", where \"X.Y.Z\" is the target version to release dev into.\n# -> Changed as part of 'master' backport to 'dev'\n# - master branch contains \"X.Y.Z\", where \"X.Y.Z\" is the current version.\n# -> Changed as part of 'dev' release (or other branch) into 'master'\n# -> In case of a commit on master without git tag, target the next version\n# with \".postN\" (release candidate, numbered) suffixed.\n# See https://www.python.org/dev/peps/pep-0440/#public-version-identifiers\nwith open(os.path.join('tests', 'core', 'pyspec', 'eth2spec', 'VERSION.txt')) as f:\n spec_version = f.read().strip()\n\nsetup(\n name='eth2spec',\n version=spec_version,\n description=\"Eth2 spec, provided as Python package for tooling and testing\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"ethereum\",\n url=\"https://github.com/ethereum/eth2.0-specs\",\n include_package_data=False,\n package_data={'configs': ['*.yaml'],\n 'presets': ['*.yaml'],\n 'specs': ['**/*.md'],\n 'eth2spec': ['VERSION.txt']},\n package_dir={\n \"eth2spec\": \"tests/core/pyspec/eth2spec\",\n \"configs\": \"configs\",\n \"presets\": \"presets\",\n \"specs\": \"specs\",\n },\n packages=find_packages(where='tests/core/pyspec') + ['configs', 'specs'],\n py_modules=[\"eth2spec\"],\n cmdclass=commands,\n python_requires=\">=3.8, <4\",\n extras_require={\n \"test\": [\"pytest>=4.4\", \"pytest-cov\", \"pytest-xdist\"],\n \"lint\": [\"flake8==3.7.7\", \"mypy==0.812\"],\n \"generator\": [\"python-snappy==0.5.4\"],\n },\n install_requires=[\n \"eth-utils>=1.3.0,<2\",\n \"eth-typing>=2.1.0,<3.0.0\",\n \"pycryptodome==3.9.4\",\n \"py_ecc==5.2.0\",\n \"milagro_bls_binding==1.6.3\",\n \"dataclasses==0.6\",\n \"remerkleable==0.1.24\",\n RUAMEL_YAML_VERSION,\n \"lru-dict==1.1.6\",\n MARKO_VERSION,\n ]\n)\n",
"path": "setup.py"
}
] | diff --git a/presets/mainnet/altair.yaml b/presets/mainnet/altair.yaml
index 9a17b78032..21e3cc3285 100644
--- a/presets/mainnet/altair.yaml
+++ b/presets/mainnet/altair.yaml
@@ -22,3 +22,7 @@ EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 256
# ---------------------------------------------------------------
# 1
MIN_SYNC_COMMITTEE_PARTICIPANTS: 1
+# SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD (= 32 * 256)
+UPDATE_TIMEOUT: 8192
+# SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD // 2 (= 32 * 256 // 2)
+SAFETY_THRESHOLD_PERIOD: 4096
diff --git a/presets/minimal/altair.yaml b/presets/minimal/altair.yaml
index 88d78bea36..7cdbd58ea7 100644
--- a/presets/minimal/altair.yaml
+++ b/presets/minimal/altair.yaml
@@ -22,3 +22,7 @@ EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8
# ---------------------------------------------------------------
# 1
MIN_SYNC_COMMITTEE_PARTICIPANTS: 1
+# SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD (= 8 * 8)
+UPDATE_TIMEOUT: 64
+# SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD // 2 (= 8 * 8 // 2)
+SAFETY_THRESHOLD_PERIOD: 32
diff --git a/setup.py b/setup.py
index 0ced87be2e..7b74fc1557 100644
--- a/setup.py
+++ b/setup.py
@@ -683,6 +683,7 @@ def combine_dicts(old_dict: Dict[str, T], new_dict: Dict[str, T]) -> Dict[str, T
'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',
'bytes', 'byte', 'ByteList', 'ByteVector',
'Dict', 'dict', 'field', 'ceillog2', 'floorlog2', 'Set',
+ 'Optional',
]
diff --git a/specs/altair/sync-protocol.md b/specs/altair/sync-protocol.md
index 0af20cbca1..ce7ae62523 100644
--- a/specs/altair/sync-protocol.md
+++ b/specs/altair/sync-protocol.md
@@ -52,8 +52,8 @@ uses sync committees introduced in [this beacon chain extension](./beacon-chain.
| Name | Value | Notes |
| - | - | - |
| `MIN_SYNC_COMMITTEE_PARTICIPANTS` | `1` | |
-| `SAFETY_THRESHOLD_PERIOD` | `SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | ~13.6 hours |
| `UPDATE_TIMEOUT` | `SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | ~27.3 hours |
+| `SAFETY_THRESHOLD_PERIOD` | `SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD // 2` | ~13.6 hours |
## Containers
@@ -79,6 +79,7 @@ class LightClientUpdate(Container):
### `LightClientStore`
```python
+@dataclass
class LightClientStore(object):
# Beacon block header that is finalized
finalized_header: BeaconBlockHeader
@@ -119,7 +120,7 @@ def get_active_header(update: LightClientUpdate) -> BeaconBlockHeader:
### `get_safety_threshold`
```python
-def get_safety_threshold(store: LightClientStore):
+def get_safety_threshold(store: LightClientStore) -> uint64:
return max(
store.previous_max_active_participants,
store.current_max_active_participants
@@ -130,10 +131,10 @@ def get_safety_threshold(store: LightClientStore):
A light client maintains its state in a `store` object of type `LightClientStore` and receives `update` objects of type `LightClientUpdate`. Every `update` triggers `process_light_client_update(store, update, current_slot)` where `current_slot` is the current slot based on some local clock. `process_slot` is processed every time the current slot increments.
-### `process_slot`
+#### `process_slot`
```python
-def process_slot(store: LightClientStore, current_slot: Slot):
+def process_slot_for_light_client_store(store: LightClientStore, current_slot: Slot) -> None:
if current_slot % SAFETY_THRESHOLD_PERIOD == 0:
store.previous_max_active_participants = store.current_max_active_participants
store.current_max_active_participants = 0
@@ -216,13 +217,16 @@ def process_light_client_update(store: LightClientStore,
validate_light_client_update(store, update, current_slot, genesis_validators_root)
# Update the best update in case we have to force-update to it if the timeout elapses
- if sum(update.sync_committee_bits) > sum(store.best_valid_update.sync_committee_bits):
+ if (
+ store.best_valid_update is None
+ or sum(update.sync_committee_bits) > sum(store.best_valid_update.sync_committee_bits)
+ ):
store.best_valid_update = update
- # Track the maximum numebr of active participants in the committee signatures
+ # Track the maximum number of active participants in the committee signatures
store.current_max_active_participants = max(
- store.current_max_active_participants,
- update.sync_committee_bits.count(1)
+ store.current_max_active_participants,
+ update.sync_committee_bits.count(1),
)
# Update the optimistic header
@@ -240,7 +244,10 @@ def process_light_client_update(store: LightClientStore,
# Normal update through 2/3 threshold
apply_light_client_update(store, update)
store.best_valid_update = None
- elif current_slot > store.finalized_header.slot + UPDATE_TIMEOUT:
+ elif (
+ current_slot > store.finalized_header.slot + UPDATE_TIMEOUT
+ and store.best_valid_update is not None
+ ):
# Forced best update when the update timeout has elapsed
apply_light_client_update(store, store.best_valid_update)
store.best_valid_update = None
|
cloud-custodian__cloud-custodian-3410 | Missing required parameter in input: "Enable" when using c7n-guardian
I am running the command:
```c7n-guardian enable --config guard-duty-accounts.yaml --master <account1_id>```
with the following config:
```
accounts:
- name: account1_name
email: [email protected]
account_id: "accountid1"
role: "arn:aws:iam::accountid1:role/CustodianGuardDuty"
- name: account2_name
email: [email protected]
account_id: "accountid2"
role: "arn:aws:iam::accountid2:role/CustodianGuardDuty"
```
I am using the following versions:
"C7NVersion": "0.8.32.1",
"C7NOrgVersion": "0.5.0",
"C7NGuardianVersion": "0.3"
| [
{
"content": "# Copyright 2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport logging\nimport operator\n\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom concurrent.futures import as_completed\nimport click\nfrom tabulate import tabulate\n\nfrom c7n.credentials import assumed_session, SessionFactory\nfrom c7n.utils import format_event, chunks\n\nfrom c7n_org.cli import init, filter_accounts, CONFIG_SCHEMA, WORKER_COUNT\n\nlog = logging.getLogger('c7n-guardian')\n\n\n# make email required in org schema\nCONFIG_SCHEMA['definitions']['account']['properties']['email'] = {'type': 'string'}\nfor el in CONFIG_SCHEMA['definitions']['account']['anyOf']:\n el['required'].append('email')\n\n\[email protected]()\ndef cli():\n \"\"\"Automate Guard Duty Setup.\"\"\"\n\n\[email protected]()\[email protected]('-c', '--config',\n required=True, help=\"Accounts config file\", type=click.Path())\[email protected]('-t', '--tags', multiple=True, default=None)\[email protected]('-a', '--accounts', multiple=True, default=None)\[email protected]('--master', help='Master account id or name')\[email protected]('--debug', help='Run single-threaded', is_flag=True)\[email protected]('--region', default='us-east-1')\ndef report(config, tags, accounts, master, debug, region):\n \"\"\"report on guard duty enablement by account\"\"\"\n accounts_config, master_info, executor = guardian_init(\n config, debug, master, accounts, tags)\n\n session = get_session(\n master_info.get('role'), 'c7n-guardian',\n master_info.get('profile'),\n region)\n\n client = session.client('guardduty')\n detector_id = get_or_create_detector_id(client)\n\n members = {m['AccountId']: m for m in\n client.list_members(DetectorId=detector_id).get('Members')}\n\n accounts_report = []\n for a in accounts_config['accounts']:\n ar = dict(a)\n accounts_report.append(ar)\n ar.pop('tags', None)\n ar.pop('role')\n ar.pop('regions', None)\n if a['account_id'] not in members:\n ar['member'] = False\n ar['status'] = None\n ar['invited'] = None\n ar['updated'] = datetime.datetime.now().isoformat()\n continue\n m = members[a['account_id']]\n ar['status'] = m['RelationshipStatus']\n ar['member'] = True\n ar['joined'] = m['InvitedAt']\n ar['updated'] = m['UpdatedAt']\n\n accounts_report.sort(key=operator.itemgetter('updated'), reverse=True)\n print(tabulate(accounts_report, headers=('keys')))\n\n\[email protected]()\[email protected]('-c', '--config',\n required=True, help=\"Accounts config file\", type=click.Path())\[email protected]('-t', '--tags', multiple=True, default=None)\[email protected]('-a', '--accounts', multiple=True, default=None)\[email protected]('--master', help='Master account id or name')\[email protected]('--debug', help='Run single-threaded', is_flag=True)\[email protected]('--suspend', help='Suspend monitoring in master', is_flag=True)\[email protected]('--disable-detector', help='Disable detector in member account',\n is_flag=True)\[email protected]('--delete-detector', help='Disable detector in member account',\n is_flag=True)\[email protected]('--dissociate', help='Disassociate member account',\n is_flag=True)\[email protected]('--region')\ndef disable(config, tags, accounts, master, debug,\n suspend, disable_detector, delete_detector, dissociate, region):\n \"\"\"suspend guard duty in the given accounts.\"\"\"\n accounts_config, master_info, executor = guardian_init(\n config, debug, master, accounts, tags)\n\n if sum(map(int, (suspend, disable_detector, dissociate))) != 1:\n raise ValueError((\n \"One and only of suspend, disable-detector, dissociate\"\n \"can be specified.\"))\n\n master_session = get_session(\n master_info['role'], 'c7n-guardian',\n master_info.get('profile'), region)\n master_client = master_session.client('guardduty')\n detector_id = get_or_create_detector_id(master_client)\n\n if suspend:\n unprocessed = master_client.stop_monitoring_members(\n DetectorId=detector_id,\n AccountIds=[a['account_id'] for a in accounts_config['accounts']]\n ).get('UnprocessedAccounts', ())\n\n if unprocessed:\n log.warning(\n \"Following accounts where unprocessed\\n %s\",\n format_event(unprocessed))\n log.info(\"Stopped monitoring %d accounts in master\",\n len(accounts_config['accounts']))\n return\n\n if dissociate:\n master_client.disassociate_members(\n DetectorId=detector_id,\n AccountIds=[a['account_id'] for a in accounts_config['accounts']])\n\n # Seems like there's a couple of ways to disable an account\n # delete the detector (member), disable the detector (master or member),\n # or disassociate members, or from member disassociate from master.\n for a in accounts_config['accounts']:\n member_session = get_session(\n a['role'], 'c7n-guardian',\n a.get('profile'), region)\n\n member_client = member_session.client('guardduty')\n m_detector_id = get_or_create_detector_id(member_client)\n if disable_detector:\n member_client.update_detector(\n DetectorId=m_detector_id, Enable=False)\n log.info(\"Disabled detector in account:%s\", a['name'])\n if dissociate:\n try:\n log.info(\"Disassociated member account:%s\", a['name'])\n result = member_client.disassociate_from_master_account(\n DetectorId=m_detector_id)\n log.info(\"Result %s\", format_event(result))\n except ClientError as e:\n if e.response['Error']['Code'] == 'InvalidInputException':\n continue\n if delete_detector:\n member_client.delete_detector(DetectorId=m_detector_id)\n log.info(\"Deleted detector in account:%s\", a['name'])\n\n\ndef get_session(role, session_name, profile, region):\n if role:\n return assumed_session(role, session_name, region=region)\n else:\n return SessionFactory(region, profile)()\n\n\ndef expand_regions(regions, partition='aws'):\n if 'all' in regions:\n regions = boto3.Session().get_available_regions('ec2')\n return regions\n\n\[email protected]()\[email protected]('-c', '--config',\n required=True, help=\"Accounts config file\", type=click.Path())\[email protected]('--master', help='Master account id or name')\[email protected]('-a', '--accounts', multiple=True, default=None)\[email protected]('-t', '--tags', multiple=True, default=None)\[email protected]('--debug', help='Run single-threaded', is_flag=True)\[email protected]('--message', help='Welcome Message for member accounts')\[email protected](\n '-r', '--region',\n default=['all'], help='Region to enable (default: all)',\n multiple=True)\ndef enable(config, master, tags, accounts, debug, message, region):\n \"\"\"enable guard duty on a set of accounts\"\"\"\n accounts_config, master_info, executor = guardian_init(\n config, debug, master, accounts, tags)\n regions = expand_regions(region)\n for r in regions:\n log.info(\"Processing Region:%s\", r)\n enable_region(master_info, accounts_config, executor, message, r)\n\n\ndef enable_region(master_info, accounts_config, executor, message, region):\n master_session = get_session(\n master_info.get('role'), 'c7n-guardian',\n master_info.get('profile'),\n region=region)\n\n master_client = master_session.client('guardduty')\n detector_id = get_or_create_detector_id(master_client)\n\n results = master_client.get_paginator(\n 'list_members').paginate(DetectorId=detector_id, OnlyAssociated=\"FALSE\")\n extant_members = results.build_full_result().get('Members', ())\n extant_ids = {m['AccountId'] for m in extant_members}\n\n # Find active members\n active_ids = {m['AccountId'] for m in extant_members\n if m['RelationshipStatus'] == 'Enabled'}\n # Find invited members\n invited_ids = {m['AccountId'] for m in extant_members\n if m['RelationshipStatus'] == 'Invited'}\n\n # Find extant members not currently enabled\n suspended_ids = {m['AccountId'] for m in extant_members\n if m['RelationshipStatus'] == 'Disabled'}\n # Filter by accounts under consideration per config and cli flags\n suspended_ids = {a['account_id'] for a in accounts_config['accounts']\n if a['account_id'] in suspended_ids}\n\n if suspended_ids:\n unprocessed = master_client.start_monitoring_members(\n DetectorId=detector_id,\n AccountIds=list(suspended_ids)).get('UnprocessedAccounts')\n if unprocessed:\n log.warning(\n \"Region: %s Unprocessed accounts on re-start monitoring %s\",\n region, format_event(unprocessed))\n log.info(\"Region: %s Restarted monitoring on %d accounts\",\n region, len(suspended_ids))\n\n members = [{'AccountId': account['account_id'], 'Email': account['email']}\n for account in accounts_config['accounts']\n if account['account_id'] not in extant_ids]\n\n if not members:\n if not suspended_ids and not invited_ids:\n log.info(\"Region:%s All accounts already enabled\", region)\n return list(active_ids)\n\n if (len(members) + len(extant_ids)) > 1000:\n raise ValueError(\n (\"Region:%s Guard Duty only supports \"\n \"1000 member accounts per master account\") % (region))\n\n log.info(\n \"Region:%s Enrolling %d accounts in guard duty\", region, len(members))\n\n unprocessed = []\n for account_set in chunks(members, 25):\n unprocessed.extend(master_client.create_members(\n DetectorId=detector_id,\n AccountDetails=account_set).get('UnprocessedAccounts', []))\n if unprocessed:\n log.warning(\n \"Region:%s accounts where unprocessed - member create\\n %s\",\n region, format_event(unprocessed))\n\n log.info(\"Region:%s Inviting %d member accounts\", region, len(members))\n unprocessed = []\n for account_set in chunks(\n [m for m in members if not m['AccountId'] in invited_ids], 25):\n params = {'AccountIds': [m['AccountId'] for m in account_set],\n 'DetectorId': detector_id}\n if message:\n params['Message'] = message\n unprocessed.extend(master_client.invite_members(\n **params).get('UnprocessedAccounts', []))\n if unprocessed:\n log.warning(\n \"Region:%s accounts where unprocessed invite-members\\n %s\",\n region, format_event(unprocessed))\n\n members = [{'AccountId': account['account_id'], 'Email': account['email']}\n for account in accounts_config['accounts']\n if account['account_id'] not in active_ids]\n\n log.info(\"Region:%s Accepting %d invitations in members\", region, len(members))\n\n with executor(max_workers=WORKER_COUNT) as w:\n futures = {}\n for a in accounts_config['accounts']:\n if a == master_info:\n continue\n if a['account_id'] in active_ids:\n continue\n futures[w.submit(enable_account, a, master_info['account_id'], region)] = a\n\n for f in as_completed(futures):\n a = futures[f]\n if f.exception():\n log.error(\"Region:%s Error processing account:%s error:%s\",\n region, a['name'], f.exception())\n continue\n if f.result():\n log.info('Region:%s Enabled guard duty on account:%s',\n region, a['name'])\n return members\n\n\ndef enable_account(account, master_account_id, region):\n member_session = get_session(\n account.get('role'), 'c7n-guardian',\n profile=account.get('profile'),\n region=region)\n member_client = member_session.client('guardduty')\n m_detector_id = get_or_create_detector_id(member_client)\n all_invitations = member_client.list_invitations().get('Invitations', [])\n invitations = [\n i for i in all_invitations\n if i['AccountId'] == master_account_id]\n invitations.sort(key=operator.itemgetter('InvitedAt'))\n if not invitations:\n log.warning(\n \"Region:%s No guard duty invitation found account:%s id:%s aid:%s\",\n region, account['name'], m_detector_id, account['account_id'])\n return\n\n member_client.accept_invitation(\n DetectorId=m_detector_id,\n InvitationId=invitations[-1]['InvitationId'],\n MasterId=master_account_id)\n return True\n\n\ndef get_or_create_detector_id(client):\n detectors = client.list_detectors().get('DetectorIds')\n if detectors:\n return detectors[0]\n else:\n return client.create_detector().get('DetectorId')\n\n\ndef get_master_info(accounts_config, master):\n master_info = None\n for a in accounts_config['accounts']:\n if a['name'] == master:\n master_info = a\n break\n if a['account_id'] == master:\n master_info = a\n break\n\n if master_info is None:\n raise ValueError(\"Master account: %s not found in accounts config\" % (\n master))\n return master_info\n\n\ndef guardian_init(config, debug, master, accounts, tags):\n accounts_config, custodian_config, executor = init(\n config, None, debug, False, None, None, None, None)\n master_info = get_master_info(accounts_config, master)\n filter_accounts(accounts_config, tags, accounts, not_accounts=[master_info['name']])\n return accounts_config, master_info, executor\n\n# AccountSet\n#\n# get master invitation\n# get detectors\n# delete detector\n# disassociate from master\n",
"path": "tools/c7n_guardian/c7n_guardian/cli.py"
}
] | [
{
"content": "# Copyright 2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport logging\nimport operator\n\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom concurrent.futures import as_completed\nimport click\nfrom tabulate import tabulate\n\nfrom c7n.credentials import assumed_session, SessionFactory\nfrom c7n.utils import format_event, chunks\n\nfrom c7n_org.cli import init, filter_accounts, CONFIG_SCHEMA, WORKER_COUNT\n\nlog = logging.getLogger('c7n-guardian')\n\n\n# make email required in org schema\nCONFIG_SCHEMA['definitions']['account']['properties']['email'] = {'type': 'string'}\nfor el in CONFIG_SCHEMA['definitions']['account']['anyOf']:\n el['required'].append('email')\n\n\[email protected]()\ndef cli():\n \"\"\"Automate Guard Duty Setup.\"\"\"\n\n\[email protected]()\[email protected]('-c', '--config',\n required=True, help=\"Accounts config file\", type=click.Path())\[email protected]('-t', '--tags', multiple=True, default=None)\[email protected]('-a', '--accounts', multiple=True, default=None)\[email protected]('--master', help='Master account id or name')\[email protected]('--debug', help='Run single-threaded', is_flag=True)\[email protected]('--region', default='us-east-1')\ndef report(config, tags, accounts, master, debug, region):\n \"\"\"report on guard duty enablement by account\"\"\"\n accounts_config, master_info, executor = guardian_init(\n config, debug, master, accounts, tags)\n\n session = get_session(\n master_info.get('role'), 'c7n-guardian',\n master_info.get('profile'),\n region)\n\n client = session.client('guardduty')\n detector_id = get_or_create_detector_id(client)\n\n members = {m['AccountId']: m for m in\n client.list_members(DetectorId=detector_id).get('Members')}\n\n accounts_report = []\n for a in accounts_config['accounts']:\n ar = dict(a)\n accounts_report.append(ar)\n ar.pop('tags', None)\n ar.pop('role')\n ar.pop('regions', None)\n if a['account_id'] not in members:\n ar['member'] = False\n ar['status'] = None\n ar['invited'] = None\n ar['updated'] = datetime.datetime.now().isoformat()\n continue\n m = members[a['account_id']]\n ar['status'] = m['RelationshipStatus']\n ar['member'] = True\n ar['joined'] = m['InvitedAt']\n ar['updated'] = m['UpdatedAt']\n\n accounts_report.sort(key=operator.itemgetter('updated'), reverse=True)\n print(tabulate(accounts_report, headers=('keys')))\n\n\[email protected]()\[email protected]('-c', '--config',\n required=True, help=\"Accounts config file\", type=click.Path())\[email protected]('-t', '--tags', multiple=True, default=None)\[email protected]('-a', '--accounts', multiple=True, default=None)\[email protected]('--master', help='Master account id or name')\[email protected]('--debug', help='Run single-threaded', is_flag=True)\[email protected]('--suspend', help='Suspend monitoring in master', is_flag=True)\[email protected]('--disable-detector', help='Disable detector in member account',\n is_flag=True)\[email protected]('--delete-detector', help='Disable detector in member account',\n is_flag=True)\[email protected]('--dissociate', help='Disassociate member account',\n is_flag=True)\[email protected]('--region')\ndef disable(config, tags, accounts, master, debug,\n suspend, disable_detector, delete_detector, dissociate, region):\n \"\"\"suspend guard duty in the given accounts.\"\"\"\n accounts_config, master_info, executor = guardian_init(\n config, debug, master, accounts, tags)\n\n if sum(map(int, (suspend, disable_detector, dissociate))) != 1:\n raise ValueError((\n \"One and only of suspend, disable-detector, dissociate\"\n \"can be specified.\"))\n\n master_session = get_session(\n master_info['role'], 'c7n-guardian',\n master_info.get('profile'), region)\n master_client = master_session.client('guardduty')\n detector_id = get_or_create_detector_id(master_client)\n\n if suspend:\n unprocessed = master_client.stop_monitoring_members(\n DetectorId=detector_id,\n AccountIds=[a['account_id'] for a in accounts_config['accounts']]\n ).get('UnprocessedAccounts', ())\n\n if unprocessed:\n log.warning(\n \"Following accounts where unprocessed\\n %s\",\n format_event(unprocessed))\n log.info(\"Stopped monitoring %d accounts in master\",\n len(accounts_config['accounts']))\n return\n\n if dissociate:\n master_client.disassociate_members(\n DetectorId=detector_id,\n AccountIds=[a['account_id'] for a in accounts_config['accounts']])\n\n # Seems like there's a couple of ways to disable an account\n # delete the detector (member), disable the detector (master or member),\n # or disassociate members, or from member disassociate from master.\n for a in accounts_config['accounts']:\n member_session = get_session(\n a['role'], 'c7n-guardian',\n a.get('profile'), region)\n\n member_client = member_session.client('guardduty')\n m_detector_id = get_or_create_detector_id(member_client)\n if disable_detector:\n member_client.update_detector(\n DetectorId=m_detector_id, Enable=False)\n log.info(\"Disabled detector in account:%s\", a['name'])\n if dissociate:\n try:\n log.info(\"Disassociated member account:%s\", a['name'])\n result = member_client.disassociate_from_master_account(\n DetectorId=m_detector_id)\n log.info(\"Result %s\", format_event(result))\n except ClientError as e:\n if e.response['Error']['Code'] == 'InvalidInputException':\n continue\n if delete_detector:\n member_client.delete_detector(DetectorId=m_detector_id)\n log.info(\"Deleted detector in account:%s\", a['name'])\n\n\ndef get_session(role, session_name, profile, region):\n if role:\n return assumed_session(role, session_name, region=region)\n else:\n return SessionFactory(region, profile)()\n\n\ndef expand_regions(regions, partition='aws'):\n if 'all' in regions:\n regions = boto3.Session().get_available_regions('ec2')\n return regions\n\n\[email protected]()\[email protected]('-c', '--config',\n required=True, help=\"Accounts config file\", type=click.Path())\[email protected]('--master', help='Master account id or name')\[email protected]('-a', '--accounts', multiple=True, default=None)\[email protected]('-t', '--tags', multiple=True, default=None)\[email protected]('--debug', help='Run single-threaded', is_flag=True)\[email protected]('--message', help='Welcome Message for member accounts')\[email protected](\n '-r', '--region',\n default=['all'], help='Region to enable (default: all)',\n multiple=True)\ndef enable(config, master, tags, accounts, debug, message, region):\n \"\"\"enable guard duty on a set of accounts\"\"\"\n accounts_config, master_info, executor = guardian_init(\n config, debug, master, accounts, tags)\n regions = expand_regions(region)\n for r in regions:\n log.info(\"Processing Region:%s\", r)\n enable_region(master_info, accounts_config, executor, message, r)\n\n\ndef enable_region(master_info, accounts_config, executor, message, region):\n master_session = get_session(\n master_info.get('role'), 'c7n-guardian',\n master_info.get('profile'),\n region=region)\n\n master_client = master_session.client('guardduty')\n detector_id = get_or_create_detector_id(master_client)\n\n results = master_client.get_paginator(\n 'list_members').paginate(DetectorId=detector_id, OnlyAssociated=\"FALSE\")\n extant_members = results.build_full_result().get('Members', ())\n extant_ids = {m['AccountId'] for m in extant_members}\n\n # Find active members\n active_ids = {m['AccountId'] for m in extant_members\n if m['RelationshipStatus'] == 'Enabled'}\n # Find invited members\n invited_ids = {m['AccountId'] for m in extant_members\n if m['RelationshipStatus'] == 'Invited'}\n\n # Find extant members not currently enabled\n suspended_ids = {m['AccountId'] for m in extant_members\n if m['RelationshipStatus'] == 'Disabled'}\n # Filter by accounts under consideration per config and cli flags\n suspended_ids = {a['account_id'] for a in accounts_config['accounts']\n if a['account_id'] in suspended_ids}\n\n if suspended_ids:\n unprocessed = master_client.start_monitoring_members(\n DetectorId=detector_id,\n AccountIds=list(suspended_ids)).get('UnprocessedAccounts')\n if unprocessed:\n log.warning(\n \"Region: %s Unprocessed accounts on re-start monitoring %s\",\n region, format_event(unprocessed))\n log.info(\"Region: %s Restarted monitoring on %d accounts\",\n region, len(suspended_ids))\n\n members = [{'AccountId': account['account_id'], 'Email': account['email']}\n for account in accounts_config['accounts']\n if account['account_id'] not in extant_ids]\n\n if not members:\n if not suspended_ids and not invited_ids:\n log.info(\"Region:%s All accounts already enabled\", region)\n return list(active_ids)\n\n if (len(members) + len(extant_ids)) > 1000:\n raise ValueError(\n (\"Region:%s Guard Duty only supports \"\n \"1000 member accounts per master account\") % (region))\n\n log.info(\n \"Region:%s Enrolling %d accounts in guard duty\", region, len(members))\n\n unprocessed = []\n for account_set in chunks(members, 25):\n unprocessed.extend(master_client.create_members(\n DetectorId=detector_id,\n AccountDetails=account_set).get('UnprocessedAccounts', []))\n if unprocessed:\n log.warning(\n \"Region:%s accounts where unprocessed - member create\\n %s\",\n region, format_event(unprocessed))\n\n log.info(\"Region:%s Inviting %d member accounts\", region, len(members))\n unprocessed = []\n for account_set in chunks(\n [m for m in members if not m['AccountId'] in invited_ids], 25):\n params = {'AccountIds': [m['AccountId'] for m in account_set],\n 'DetectorId': detector_id}\n if message:\n params['Message'] = message\n unprocessed.extend(master_client.invite_members(\n **params).get('UnprocessedAccounts', []))\n if unprocessed:\n log.warning(\n \"Region:%s accounts where unprocessed invite-members\\n %s\",\n region, format_event(unprocessed))\n\n members = [{'AccountId': account['account_id'], 'Email': account['email']}\n for account in accounts_config['accounts']\n if account['account_id'] not in active_ids]\n\n log.info(\"Region:%s Accepting %d invitations in members\", region, len(members))\n\n with executor(max_workers=WORKER_COUNT) as w:\n futures = {}\n for a in accounts_config['accounts']:\n if a == master_info:\n continue\n if a['account_id'] in active_ids:\n continue\n futures[w.submit(enable_account, a, master_info['account_id'], region)] = a\n\n for f in as_completed(futures):\n a = futures[f]\n if f.exception():\n log.error(\"Region:%s Error processing account:%s error:%s\",\n region, a['name'], f.exception())\n continue\n if f.result():\n log.info('Region:%s Enabled guard duty on account:%s',\n region, a['name'])\n return members\n\n\ndef enable_account(account, master_account_id, region):\n member_session = get_session(\n account.get('role'), 'c7n-guardian',\n profile=account.get('profile'),\n region=region)\n member_client = member_session.client('guardduty')\n m_detector_id = get_or_create_detector_id(member_client)\n all_invitations = member_client.list_invitations().get('Invitations', [])\n invitations = [\n i for i in all_invitations\n if i['AccountId'] == master_account_id]\n invitations.sort(key=operator.itemgetter('InvitedAt'))\n if not invitations:\n log.warning(\n \"Region:%s No guard duty invitation found account:%s id:%s aid:%s\",\n region, account['name'], m_detector_id, account['account_id'])\n return\n\n member_client.accept_invitation(\n DetectorId=m_detector_id,\n InvitationId=invitations[-1]['InvitationId'],\n MasterId=master_account_id)\n return True\n\n\ndef get_or_create_detector_id(client):\n detectors = client.list_detectors().get('DetectorIds')\n if detectors:\n return detectors[0]\n else:\n return client.create_detector(Enable=True).get('DetectorId')\n\n\ndef get_master_info(accounts_config, master):\n master_info = None\n for a in accounts_config['accounts']:\n if a['name'] == master:\n master_info = a\n break\n if a['account_id'] == master:\n master_info = a\n break\n\n if master_info is None:\n raise ValueError(\"Master account: %s not found in accounts config\" % (\n master))\n return master_info\n\n\ndef guardian_init(config, debug, master, accounts, tags):\n accounts_config, custodian_config, executor = init(\n config, None, debug, False, None, None, None, None)\n master_info = get_master_info(accounts_config, master)\n filter_accounts(accounts_config, tags, accounts, not_accounts=[master_info['name']])\n return accounts_config, master_info, executor\n\n# AccountSet\n#\n# get master invitation\n# get detectors\n# delete detector\n# disassociate from master\n",
"path": "tools/c7n_guardian/c7n_guardian/cli.py"
}
] | diff --git a/tools/c7n_guardian/c7n_guardian/cli.py b/tools/c7n_guardian/c7n_guardian/cli.py
index ad5619bb3c2..b85d1220533 100644
--- a/tools/c7n_guardian/c7n_guardian/cli.py
+++ b/tools/c7n_guardian/c7n_guardian/cli.py
@@ -340,7 +340,7 @@ def get_or_create_detector_id(client):
if detectors:
return detectors[0]
else:
- return client.create_detector().get('DetectorId')
+ return client.create_detector(Enable=True).get('DetectorId')
def get_master_info(accounts_config, master):
|
arviz-devs__arviz-596 | Installing arviz breaks pymc3 installation
**Describe the bug**
Installing Arviz breaks a pymc3 installation, which is unfortunate because they're built to be compatible. After installation, importing pymc3 throws the following error.
> WARNING (theano.tensor.blas): Using NumPy C-API based implementation for BLAS functions.
The reason is because arviz installation requires numpy==1.15 rather than numpy>=1.15. If you have 1.16, it uninstalls it and re-installs 1.15. It's annoying to fix. I ended up having to scrap the whole virtual environment and start over.
**To Reproduce**
Install arviz if you have any version of numpy other than 1.15, then import pymc3.
**Expected behavior**
Do not force downgrade of numpy.
| [
{
"content": "# pylint: disable=wildcard-import,invalid-name,wrong-import-position\n\"\"\"ArviZ is a library for exploratory analysis of Bayesian models.\"\"\"\n__version__ = \"0.3.2\"\n\nimport os\nimport logging\nfrom matplotlib.pyplot import style\n\n# add ArviZ's styles to matplotlib's styles\narviz_style_path = os.path.join(os.path.dirname(__file__), \"plots\", \"styles\")\nstyle.core.USER_LIBRARY_PATHS.append(arviz_style_path)\nstyle.core.reload_library()\n\n# Configure logging before importing arviz internals\n_log = logging.getLogger(\"arviz\")\n\nif not logging.root.handlers:\n handler = logging.StreamHandler()\n _log.setLevel(logging.INFO)\n _log.addHandler(handler)\n\nfrom .data import *\nfrom .plots import *\nfrom .stats import *\n",
"path": "arviz/__init__.py"
}
] | [
{
"content": "# pylint: disable=wildcard-import,invalid-name,wrong-import-position\n\"\"\"ArviZ is a library for exploratory analysis of Bayesian models.\"\"\"\n__version__ = \"0.3.3\"\n\nimport os\nimport logging\nfrom matplotlib.pyplot import style\n\n# add ArviZ's styles to matplotlib's styles\narviz_style_path = os.path.join(os.path.dirname(__file__), \"plots\", \"styles\")\nstyle.core.USER_LIBRARY_PATHS.append(arviz_style_path)\nstyle.core.reload_library()\n\n# Configure logging before importing arviz internals\n_log = logging.getLogger(\"arviz\")\n\nif not logging.root.handlers:\n handler = logging.StreamHandler()\n _log.setLevel(logging.INFO)\n _log.addHandler(handler)\n\nfrom .data import *\nfrom .plots import *\nfrom .stats import *\n",
"path": "arviz/__init__.py"
}
] | diff --git a/arviz/__init__.py b/arviz/__init__.py
index b9a909cd9c..3b1765c9f8 100644
--- a/arviz/__init__.py
+++ b/arviz/__init__.py
@@ -1,6 +1,6 @@
# pylint: disable=wildcard-import,invalid-name,wrong-import-position
"""ArviZ is a library for exploratory analysis of Bayesian models."""
-__version__ = "0.3.2"
+__version__ = "0.3.3"
import os
import logging
|
liqd__a4-meinberlin-539 | use list markup for lists of elements
Part of the BITV-Test: "1.3.1b HTML-Strukturelemente für Listen"
----
- [x] list of blueprints
- [x] list of projects
unsure:
- [ ] list of questions in poll contents
- [ ] list of choices in poll contents
| [
{
"content": "from django import template\nfrom django.template.loader import render_to_string\n\nregister = template.Library()\n\n\[email protected]_tag\ndef include_template_string(template, **kwargs):\n rendered_template = render_to_string(template, kwargs)\n return str(rendered_template)\n\n\[email protected]_tag\ndef combined_url_parameter(request_query_dict, **kwargs):\n combined_query_dict = request_query_dict.copy()\n for key in kwargs:\n combined_query_dict.setlist(key, [kwargs[key]])\n encoded_parameter = '?' + combined_query_dict.urlencode()\n return encoded_parameter\n\n\[email protected]_tag\ndef filter_has_perm(perm, user, objects):\n \"\"\"Filter a list of objects based on user permissions.\"\"\"\n if not hasattr(user, 'has_perm'):\n # If the swapped user model does not support permissions, all objects\n # will be returned. This is taken from rules.templatetags.has_perm.\n return objects\n else:\n return (obj for obj in objects if user.has_perm(perm, obj))\n",
"path": "apps/contrib/templatetags/contrib_tags.py"
}
] | [
{
"content": "from django import template\nfrom django.template.loader import render_to_string\n\nregister = template.Library()\n\n\[email protected]_tag\ndef include_template_string(template, **kwargs):\n rendered_template = render_to_string(template, kwargs)\n return str(rendered_template)\n\n\[email protected]_tag\ndef combined_url_parameter(request_query_dict, **kwargs):\n combined_query_dict = request_query_dict.copy()\n for key in kwargs:\n combined_query_dict.setlist(key, [kwargs[key]])\n encoded_parameter = '?' + combined_query_dict.urlencode()\n return encoded_parameter\n\n\[email protected]_tag\ndef filter_has_perm(perm, user, objects):\n \"\"\"Filter a list of objects based on user permissions.\"\"\"\n if not hasattr(user, 'has_perm'):\n # If the swapped user model does not support permissions, all objects\n # will be returned. This is taken from rules.templatetags.has_perm.\n return objects\n else:\n return [obj for obj in objects if user.has_perm(perm, obj)]\n",
"path": "apps/contrib/templatetags/contrib_tags.py"
}
] | diff --git a/apps/budgeting/templates/meinberlin_budgeting/includes/proposal_list_item.html b/apps/budgeting/templates/meinberlin_budgeting/includes/proposal_list_item.html
index c03d68f83b..de73094c9b 100644
--- a/apps/budgeting/templates/meinberlin_budgeting/includes/proposal_list_item.html
+++ b/apps/budgeting/templates/meinberlin_budgeting/includes/proposal_list_item.html
@@ -1,6 +1,6 @@
{% load i18n item_tags moderatorfeedback_tags humanize %}
-<div class="list-item list-item--squashed">
+<li class="list-item list-item--squashed">
<div class="list-item__stats">
{% spaceless %}
<span class="rating">
@@ -47,4 +47,4 @@ <h3 class="list-item__title">
<span class="list-item__date">
{{ object.created | date }}
</span>
-</div>
+</li>
diff --git a/apps/budgeting/templates/meinberlin_budgeting/proposal_list.html b/apps/budgeting/templates/meinberlin_budgeting/proposal_list.html
index 7b8210717a..a1a4fb41f7 100644
--- a/apps/budgeting/templates/meinberlin_budgeting/proposal_list.html
+++ b/apps/budgeting/templates/meinberlin_budgeting/proposal_list.html
@@ -44,14 +44,16 @@
<div class="module-content">
<div class="l-wrapper">
<div class="l-center-8">
-
- {% for object in object_list %}
- {% include "meinberlin_budgeting/includes/proposal_list_item.html" with object=object %}
- {% endfor %}
-
- {% if object_list.count == 0 %}
+ {% if object_list.count > 0 %}
+ <ul class="u-list-reset">
+ {% for object in object_list %}
+ {% include "meinberlin_budgeting/includes/proposal_list_item.html" with object=object %}
+ {% endfor %}
+ </ul>
+ {% else %}
{% trans "Nothing to show" %}
{% endif %}
+
{% include "meinberlin_contrib/includes/pagination.html" %}
</div>
</div>
diff --git a/apps/cms/templates/meinberlin_cms/blocks/projects_block.html b/apps/cms/templates/meinberlin_cms/blocks/projects_block.html
index eab4bb0680..e2ca731b4d 100644
--- a/apps/cms/templates/meinberlin_cms/blocks/projects_block.html
+++ b/apps/cms/templates/meinberlin_cms/blocks/projects_block.html
@@ -1,11 +1,11 @@
{% load rules i18n %}
<div class="block block--projects">
<h2>{{ value.title }}</h2>
- <div class="l-tiles-4">
+ <ul class="l-tiles-4">
{% for project in value.projects %}
{% include "meinberlin_projects/includes/project_list_tile.html" with project=project %}
{% endfor %}
- </div>
+ </ul>
<div class="block__actions">
<a href="{% url 'project-list' %}" class="button">{% trans 'Show all projects' %}</a>
</div>
diff --git a/apps/contrib/templatetags/contrib_tags.py b/apps/contrib/templatetags/contrib_tags.py
index efa1053a98..d89cebd116 100644
--- a/apps/contrib/templatetags/contrib_tags.py
+++ b/apps/contrib/templatetags/contrib_tags.py
@@ -27,4 +27,4 @@ def filter_has_perm(perm, user, objects):
# will be returned. This is taken from rules.templatetags.has_perm.
return objects
else:
- return (obj for obj in objects if user.has_perm(perm, obj))
+ return [obj for obj in objects if user.has_perm(perm, obj)]
diff --git a/apps/dashboard/templates/meinberlin_dashboard/blueprint_list.html b/apps/dashboard/templates/meinberlin_dashboard/blueprint_list.html
index 29ccae261e..1490492845 100644
--- a/apps/dashboard/templates/meinberlin_dashboard/blueprint_list.html
+++ b/apps/dashboard/templates/meinberlin_dashboard/blueprint_list.html
@@ -17,10 +17,10 @@
<h1 class="menu-layout__title">{% trans "New Project" %}</h1>
-<div class="l-tiles-3">
+<ul class="l-tiles-3">
{% for blueprint_slug, blueprint in view.blueprints %}
{% include "meinberlin_dashboard/includes/blueprint_list_tile.html" with blueprint=blueprint %}
{% endfor %}
-</div>
+</ul>
{% endblock %}
diff --git a/apps/dashboard/templates/meinberlin_dashboard/includes/blueprint_list_tile.html b/apps/dashboard/templates/meinberlin_dashboard/includes/blueprint_list_tile.html
index 004f051bb3..b14968b273 100644
--- a/apps/dashboard/templates/meinberlin_dashboard/includes/blueprint_list_tile.html
+++ b/apps/dashboard/templates/meinberlin_dashboard/includes/blueprint_list_tile.html
@@ -1,6 +1,6 @@
{% load i18n static %}
-<div class="tile">
+<li class="tile">
<div class="tile__image tile__image--contained" style="background-image: url({% static blueprint.image %})"></div>
<div class="tile__body">
<h3 class="tile__title">{{ blueprint.title }}</h3>
@@ -19,4 +19,4 @@ <h3 class="tile__title">{{ blueprint.title }}</h3>
{% trans 'Select'%}
</a>
</div>
-</div>
+</li>
diff --git a/apps/dashboard/templates/meinberlin_dashboard/includes/external_project_list_item.html b/apps/dashboard/templates/meinberlin_dashboard/includes/external_project_list_item.html
index 6d5250893a..d1cf6fdd36 100644
--- a/apps/dashboard/templates/meinberlin_dashboard/includes/external_project_list_item.html
+++ b/apps/dashboard/templates/meinberlin_dashboard/includes/external_project_list_item.html
@@ -1,6 +1,6 @@
{% load i18n project_tags dashboard_tags %}
-<div class="list-item">
+<li class="list-item">
<p class="list-item__subtitle">{{ project.typ|get_blueprint_title }}</p>
<h3 class="list-item__title">
<a href="{{ project.externalproject.url }}">
@@ -29,4 +29,4 @@ <h3 class="list-item__title">
</button>
</form>
</div>
-</div>
+</li>
diff --git a/apps/dashboard/templates/meinberlin_dashboard/includes/project_list_item.html b/apps/dashboard/templates/meinberlin_dashboard/includes/project_list_item.html
index a38bd79995..47db134c1b 100644
--- a/apps/dashboard/templates/meinberlin_dashboard/includes/project_list_item.html
+++ b/apps/dashboard/templates/meinberlin_dashboard/includes/project_list_item.html
@@ -1,6 +1,6 @@
{% load i18n project_tags extproject_tags dashboard_tags %}
-<div class="list-item">
+<li class="list-item">
<p class="list-item__subtitle">{{ project.typ|get_blueprint_title }}</p>
<h3 class="list-item__title">
{{ project.name }}
@@ -59,4 +59,4 @@ <h3 class="list-item__title">
</span>
{% endif %}
</div>
-</div>
+</li>
diff --git a/apps/dashboard/templates/meinberlin_dashboard/project_list.html b/apps/dashboard/templates/meinberlin_dashboard/project_list.html
index d72c5cef80..90b58f68cb 100644
--- a/apps/dashboard/templates/meinberlin_dashboard/project_list.html
+++ b/apps/dashboard/templates/meinberlin_dashboard/project_list.html
@@ -19,11 +19,15 @@ <h1 class="lr-bar__left">
{% include "meinberlin_contrib/includes/filter_and_sort.html" with filter=view.filter %}
- {% for project in project_list %}
- {% include "meinberlin_dashboard/includes/project_list_item.html" with project=project %}
- {% empty %}
+ {% if project_list|length > 0 %}
+ <ul class="u-list-reset">
+ {% for project in project_list %}
+ {% include "meinberlin_dashboard/includes/project_list_item.html" with project=project %}
+ {% endfor %}
+ </ul>
+ {% else %}
<p>{% trans 'We could not find any projects.' %}</p>
- {% endfor %}
+ {% endif %}
{% include "meinberlin_contrib/includes/pagination.html" %}
diff --git a/apps/ideas/templates/meinberlin_ideas/idea_list.html b/apps/ideas/templates/meinberlin_ideas/idea_list.html
index 322c62c45e..c1248c9814 100644
--- a/apps/ideas/templates/meinberlin_ideas/idea_list.html
+++ b/apps/ideas/templates/meinberlin_ideas/idea_list.html
@@ -21,16 +21,16 @@
<div class="module-content">
<div class="l-wrapper">
<div class="l-center-8">
-
- <div class="list__container">
- {% for object in object_list %}
- {% include "meinberlin_ideas/includes/idea_list_item.html" with object=object %}
- {% endfor %}
- </div>
-
- {% if object_list.count == 0 %}
+ {% if object_list.count > 0 %}
+ <ul class="u-list-reset">
+ {% for object in object_list %}
+ {% include "meinberlin_ideas/includes/idea_list_item.html" with object=object %}
+ {% endfor %}
+ </ul>
+ {% else %}
{% trans "Nothing to show" %}
{% endif %}
+
{% include "meinberlin_contrib/includes/pagination.html" %}
</div>
</div>
diff --git a/apps/ideas/templates/meinberlin_ideas/includes/idea_list_item.html b/apps/ideas/templates/meinberlin_ideas/includes/idea_list_item.html
index 504d84908e..db3a60b9cc 100644
--- a/apps/ideas/templates/meinberlin_ideas/includes/idea_list_item.html
+++ b/apps/ideas/templates/meinberlin_ideas/includes/idea_list_item.html
@@ -1,6 +1,6 @@
{% load i18n module_tags item_tags %}
-<div class="list-item list-item--squashed">
+<li class="list-item list-item--squashed">
<div class="list-item__stats">
{% spaceless %}
{% if object|has_feature:"rate" %}
@@ -40,4 +40,4 @@ <h3 class="list-item__title">
<span class="list-item__date">
{{ object.created | date }}
</span>
-</div>
+</li>
diff --git a/apps/kiezkasse/templates/meinberlin_kiezkasse/includes/proposal_list_item.html b/apps/kiezkasse/templates/meinberlin_kiezkasse/includes/proposal_list_item.html
index c03d68f83b..de73094c9b 100644
--- a/apps/kiezkasse/templates/meinberlin_kiezkasse/includes/proposal_list_item.html
+++ b/apps/kiezkasse/templates/meinberlin_kiezkasse/includes/proposal_list_item.html
@@ -1,6 +1,6 @@
{% load i18n item_tags moderatorfeedback_tags humanize %}
-<div class="list-item list-item--squashed">
+<li class="list-item list-item--squashed">
<div class="list-item__stats">
{% spaceless %}
<span class="rating">
@@ -47,4 +47,4 @@ <h3 class="list-item__title">
<span class="list-item__date">
{{ object.created | date }}
</span>
-</div>
+</li>
diff --git a/apps/kiezkasse/templates/meinberlin_kiezkasse/proposal_list.html b/apps/kiezkasse/templates/meinberlin_kiezkasse/proposal_list.html
index 57bb9e8dd8..928f47b4f0 100644
--- a/apps/kiezkasse/templates/meinberlin_kiezkasse/proposal_list.html
+++ b/apps/kiezkasse/templates/meinberlin_kiezkasse/proposal_list.html
@@ -44,14 +44,16 @@
<div class="module-content">
<div class="l-wrapper">
<div class="l-center-8">
-
- {% for object in object_list %}
- {% include "meinberlin_kiezkasse/includes/proposal_list_item.html" with object=object %}
- {% endfor %}
-
- {% if object_list.count == 0 %}
+ {% if object_list.count > 0 %}
+ <ul class="u-list-reset">
+ {% for object in object_list %}
+ {% include "meinberlin_kiezkasse/includes/proposal_list_item.html" with object=object %}
+ {% endfor %}
+ </ul>
+ {% else %}
{% trans "Nothing to show" %}
{% endif %}
+
{% include "meinberlin_contrib/includes/pagination.html" %}
</div>
</div>
diff --git a/apps/mapideas/templates/meinberlin_mapideas/includes/mapidea_list_item.html b/apps/mapideas/templates/meinberlin_mapideas/includes/mapidea_list_item.html
index dfe3694fc2..37b226b7bf 100644
--- a/apps/mapideas/templates/meinberlin_mapideas/includes/mapidea_list_item.html
+++ b/apps/mapideas/templates/meinberlin_mapideas/includes/mapidea_list_item.html
@@ -1,6 +1,6 @@
{% load i18n module_tags item_tags %}
-<div class="list-item list-item--squashed">
+<li class="list-item list-item--squashed">
<div class="list-item__stats">
{% spaceless %}
{% if object|has_feature:"rate" %}
@@ -41,4 +41,4 @@ <h3 class="list-item__title">
<span class="list-item__date">
{{ object.created | date }}
</span>
-</div>
+</li>
diff --git a/apps/mapideas/templates/meinberlin_mapideas/mapidea_list.html b/apps/mapideas/templates/meinberlin_mapideas/mapidea_list.html
index ac413cdd45..6a156a51f7 100644
--- a/apps/mapideas/templates/meinberlin_mapideas/mapidea_list.html
+++ b/apps/mapideas/templates/meinberlin_mapideas/mapidea_list.html
@@ -44,16 +44,16 @@
<div class="module-content">
<div class="l-wrapper">
<div class="l-center-8">
-
- <div class="list__container">
- {% for object in object_list %}
- {% include "meinberlin_mapideas/includes/mapidea_list_item.html" with object=object %}
- {% endfor %}
- </div>
-
- {% if object_list.count == 0 %}
+ {% if object_list.count > 0 %}
+ <ul class="u-list-reset">
+ {% for object in object_list %}
+ {% include "meinberlin_mapideas/includes/mapidea_list_item.html" with object=object %}
+ {% endfor %}
+ </ul>
+ {% else %}
{% trans "Nothing to show" %}
{% endif %}
+
{% include "meinberlin_contrib/includes/pagination.html" %}
</div>
</div>
diff --git a/apps/projects/templates/meinberlin_projects/includes/project_list_tile.html b/apps/projects/templates/meinberlin_projects/includes/project_list_tile.html
index 49fcb9f536..29e40233e6 100644
--- a/apps/projects/templates/meinberlin_projects/includes/project_list_tile.html
+++ b/apps/projects/templates/meinberlin_projects/includes/project_list_tile.html
@@ -1,6 +1,6 @@
{% load i18n project_tags extproject_tags thumbnail static %}
-<div class="tile">
+<li class="tile">
<div class="tile__head">
<a tabindex="-1" class="tile__image tile__image--shadowed"
aria-labelledby="project-title-{{ project.pk }}"
@@ -42,4 +42,4 @@ <h3 class="tile__title" id="project-title-{{ project.pk }}">
</p>
{% endif %}
</div>
-</div>
+</li>
diff --git a/apps/projects/templates/meinberlin_projects/project_list.html b/apps/projects/templates/meinberlin_projects/project_list.html
index 00dd7c174b..13947daefd 100644
--- a/apps/projects/templates/meinberlin_projects/project_list.html
+++ b/apps/projects/templates/meinberlin_projects/project_list.html
@@ -7,15 +7,16 @@
<div class="l-wrapper">
{% include "meinberlin_contrib/includes/filter_and_sort.html" with filter=view.filter %}
- <div class="l-tiles-4">
- {% filter_has_perm 'a4projects.view_project' request.user project_list as filtered_projects %}
-
- {% for project in filtered_projects %}
- {% include "meinberlin_projects/includes/project_list_tile.html" with project=project %}
- {% empty %}
- <p>{% trans 'We could not find any projects.' %}</p>
- {% endfor %}
- </div>
+ {% filter_has_perm 'a4projects.view_project' request.user project_list as filtered_projects %}
+ {% if filtered_projects|length > 0 %}
+ <ul class="l-tiles-4">
+ {% for project in filtered_projects %}
+ {% include "meinberlin_projects/includes/project_list_tile.html" with project=project %}
+ {% endfor %}
+ </ul>
+ {% else %}
+ <p>{% trans 'We could not find any projects.' %}</p>
+ {% endif %}
{% include "meinberlin_contrib/includes/pagination.html" %}
</div>
diff --git a/apps/topicprio/templates/meinberlin_topicprio/includes/topic_list_item.html b/apps/topicprio/templates/meinberlin_topicprio/includes/topic_list_item.html
index 575921404b..cfbd76006a 100644
--- a/apps/topicprio/templates/meinberlin_topicprio/includes/topic_list_item.html
+++ b/apps/topicprio/templates/meinberlin_topicprio/includes/topic_list_item.html
@@ -1,6 +1,6 @@
{% load i18n module_tags item_tags %}
-<div class="list-item list-item--squashed">
+<li class="list-item list-item--squashed">
<div class="list-item__stats">
{% spaceless %}
{% if object|has_feature:"rate" %}
@@ -38,4 +38,4 @@ <h3 class="list-item__title">
<span class="list-item__date">
{{ object.created | date }}
</span>
-</div>
+</li>
diff --git a/apps/topicprio/templates/meinberlin_topicprio/includes/topic_mgmt_list_item.html b/apps/topicprio/templates/meinberlin_topicprio/includes/topic_mgmt_list_item.html
index 1b49427ee1..41513e33d1 100644
--- a/apps/topicprio/templates/meinberlin_topicprio/includes/topic_mgmt_list_item.html
+++ b/apps/topicprio/templates/meinberlin_topicprio/includes/topic_mgmt_list_item.html
@@ -1,6 +1,6 @@
{% load i18n %}
-<div class="list-item list-item--squashed">
+<li class="list-item list-item--squashed">
<div class="lr-bar">
<div class="lr-bar__left">
<h3 class="list-item__title">
@@ -17,4 +17,4 @@ <h3 class="list-item__title">
</a>
</div>
</div>
-</div>
+</li>
diff --git a/apps/topicprio/templates/meinberlin_topicprio/topic_list.html b/apps/topicprio/templates/meinberlin_topicprio/topic_list.html
index eed0364a71..0c77b245ad 100644
--- a/apps/topicprio/templates/meinberlin_topicprio/topic_list.html
+++ b/apps/topicprio/templates/meinberlin_topicprio/topic_list.html
@@ -12,16 +12,16 @@
<div class="module-content">
<div class="l-wrapper">
<div class="l-center-8">
-
- <div class="list__container">
- {% for object in object_list %}
- {% include "meinberlin_topicprio/includes/topic_list_item.html" with object=object %}
- {% endfor %}
- </div>
-
- {% if object_list.count == 0 %}
+ {% if object_list.count > 0 %}
+ <ul class="u-list-reset">
+ {% for object in object_list %}
+ {% include "meinberlin_topicprio/includes/topic_list_item.html" with object=object %}
+ {% endfor %}
+ </ul>
+ {% else %}
{% trans "Nothing to show" %}
{% endif %}
+
{% include "meinberlin_contrib/includes/pagination.html" %}
</div>
</div>
diff --git a/apps/topicprio/templates/meinberlin_topicprio/topic_mgmt_list.html b/apps/topicprio/templates/meinberlin_topicprio/topic_mgmt_list.html
index c6b4198f33..7c32110042 100644
--- a/apps/topicprio/templates/meinberlin_topicprio/topic_mgmt_list.html
+++ b/apps/topicprio/templates/meinberlin_topicprio/topic_mgmt_list.html
@@ -25,15 +25,16 @@ <h1 class="lr-bar__left">
<div class="l-center-8">
{% include "meinberlin_contrib/includes/filter_and_sort.html" with filter=view.filter %}
- <div class="list__container">
- {% for object in object_list %}
- {% include "meinberlin_topicprio/includes/topic_mgmt_list_item.html" with object=object %}
- {% endfor %}
- </div>
-
- {% if object_list.count == 0 %}
+ {% if object_list.count > 0 %}
+ <ul>
+ {% for object in object_list %}
+ {% include "meinberlin_topicprio/includes/topic_mgmt_list_item.html" with object=object %}
+ {% endfor %}
+ </ul>
+ {% else %}
{% trans "Nothing to show" %}
{% endif %}
+
{% include "meinberlin_contrib/includes/pagination.html" %}
</div>
</div>
diff --git a/meinberlin/assets/scss/_layout.scss b/meinberlin/assets/scss/_layout.scss
index 03c3fcac96..6933ff8d50 100644
--- a/meinberlin/assets/scss/_layout.scss
+++ b/meinberlin/assets/scss/_layout.scss
@@ -1,5 +1,9 @@
+// should be used with ul/ol
@mixin grid-tiles($n, $settings: ()) {
@include clearfix;
+ margin: 0;
+ padding: 0;
+ list-style: none;
> * {
@include grid-same-width($n, $settings);
diff --git a/meinberlin/assets/scss/utility.scss b/meinberlin/assets/scss/utility.scss
index 9edb056564..a670d06586 100644
--- a/meinberlin/assets/scss/utility.scss
+++ b/meinberlin/assets/scss/utility.scss
@@ -9,3 +9,9 @@
.u-spacer-bottom {
margin-bottom: $spacer;
}
+
+.u-list-reset {
+ list-style: none;
+ margin: 0;
+ padding: 0;
+}
|
frappe__frappe-21275 | New Dashboard Chart throws TypeError: format requires a mapping
## Description of the issue
I have created an elementary, stripped-down report to demonstrate the problem. It has one filter called "period" of type "Select" and has three options, as shown below:
Period (filter field)

The report doc:

When I run the report using the "Show Report" button and select one of the filter options, it simply lists that option, as shown below.

The next step is to create a chart out of this report. I open the New Dashboard Chart screen, specify Chart Name, change Chart Type to Report and select "Test Report" from the "Report Name" list.

### Observed result
As soon as I select the report name in the dropdown, It shows the following error:

### Expected result
I should be able to select fields in the X-field and Y-axis as well as Filters should be populated.
### Stacktrace / full error message
### App Versions
```
{
"erpnext": "14.25.1",
"frappe": "14.36.3",
"payments": "0.0.1"
}
```
### Route
```
Form/Dashboard Chart/new-dashboard-chart-3
```
### Traceback
```
Traceback (most recent call last):
File "apps/frappe/frappe/app.py", line 66, in application
response = frappe.api.handle()
File "apps/frappe/frappe/api.py", line 54, in handle
return frappe.handler.handle()
File "apps/frappe/frappe/handler.py", line 45, in handle
data = execute_cmd(cmd)
File "apps/frappe/frappe/handler.py", line 83, in execute_cmd
return frappe.call(method, **frappe.form_dict)
File "apps/frappe/frappe/__init__.py", line 1607, in call
return fn(*args, **newargs)
File "apps/frappe/frappe/__init__.py", line 789, in wrapper_fn
retval = fn(*args, **get_newargs(fn, kwargs))
File "apps/frappe/frappe/desk/query_report.py", line 231, in run
result = generate_report_result(report, filters, user, custom_columns, is_tree, parent_field)
File "apps/frappe/frappe/__init__.py", line 789, in wrapper_fn
retval = fn(*args, **get_newargs(fn, kwargs))
File "apps/frappe/frappe/desk/query_report.py", line 90, in generate_report_result
res = get_report_result(report, filters) or []
File "apps/frappe/frappe/desk/query_report.py", line 68, in get_report_result
res = report.execute_query_report(filters)
File "apps/frappe/frappe/core/doctype/report/report.py", line 117, in execute_query_report
result = [list(t) for t in frappe.db.sql(self.query, filters)]
File "apps/frappe/frappe/database/database.py", line 219, in sql
self._cursor.execute(query, values)
File "env/lib/python3.10/site-packages/pymysql/cursors.py", line 156, in execute
query = self.mogrify(query, args)
File "env/lib/python3.10/site-packages/pymysql/cursors.py", line 134, in mogrify
query = query % self._escape_args(args, conn)
TypeError: format requires a mapping
```
### Request Data
```
{
"type": "POST",
"args": {
"report_name": "Sales Profitability",
"filters": null,
"ignore_prepared_report": 1
},
"headers": {},
"error_handlers": {},
"url": "/api/method/frappe.desk.query_report.run"
}
```
### Response Data
```
{
"exception": "TypeError: format requires a mapping"
}
```
## Additional information
Hosted on Frappe Cloud
| [
{
"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\nimport datetime\nimport json\nimport os\nfrom datetime import timedelta\n\nimport frappe\nimport frappe.desk.reportview\nfrom frappe import _\nfrom frappe.core.utils import ljust_list\nfrom frappe.desk.reportview import clean_params, parse_json\nfrom frappe.model.utils import render_include\nfrom frappe.modules import get_module_path, scrub\nfrom frappe.monitor import add_data_to_monitor\nfrom frappe.permissions import get_role_permissions\nfrom frappe.utils import cint, cstr, flt, format_duration, get_html_format, sbool\n\n\ndef get_report_doc(report_name):\n\tdoc = frappe.get_doc(\"Report\", report_name)\n\tdoc.custom_columns = []\n\tdoc.custom_filters = []\n\n\tif doc.report_type == \"Custom Report\":\n\t\tcustom_report_doc = doc\n\t\treference_report = custom_report_doc.reference_report\n\t\tdoc = frappe.get_doc(\"Report\", reference_report)\n\t\tdoc.custom_report = report_name\n\t\tif custom_report_doc.json:\n\t\t\tdata = json.loads(custom_report_doc.json)\n\t\t\tif data:\n\t\t\t\tdoc.custom_columns = data.get(\"columns\")\n\t\t\t\tdoc.custom_filters = data.get(\"filters\")\n\t\tdoc.is_custom_report = True\n\n\tif not doc.is_permitted():\n\t\tfrappe.throw(\n\t\t\t_(\"You don't have access to Report: {0}\").format(report_name),\n\t\t\tfrappe.PermissionError,\n\t\t)\n\n\tif not frappe.has_permission(doc.ref_doctype, \"report\"):\n\t\tfrappe.throw(\n\t\t\t_(\"You don't have permission to get a report on: {0}\").format(doc.ref_doctype),\n\t\t\tfrappe.PermissionError,\n\t\t)\n\n\tif doc.disabled:\n\t\tfrappe.throw(_(\"Report {0} is disabled\").format(report_name))\n\n\treturn doc\n\n\ndef get_report_result(report, filters):\n\tres = None\n\n\tif report.report_type == \"Query Report\":\n\t\tres = report.execute_query_report(filters)\n\n\telif report.report_type == \"Script Report\":\n\t\tres = report.execute_script_report(filters)\n\n\telif report.report_type == \"Custom Report\":\n\t\tref_report = get_report_doc(report.report_name)\n\t\tres = get_report_result(ref_report, filters)\n\n\treturn res\n\n\[email protected]_only()\ndef generate_report_result(\n\treport, filters=None, user=None, custom_columns=None, is_tree=False, parent_field=None\n):\n\tuser = user or frappe.session.user\n\tfilters = filters or []\n\n\tif filters and isinstance(filters, str):\n\t\tfilters = json.loads(filters)\n\n\tres = get_report_result(report, filters) or []\n\n\tcolumns, result, message, chart, report_summary, skip_total_row = ljust_list(res, 6)\n\tcolumns = [get_column_as_dict(col) for col in (columns or [])]\n\treport_column_names = [col[\"fieldname\"] for col in columns]\n\n\t# convert to list of dicts\n\tresult = normalize_result(result, columns)\n\n\tif report.custom_columns:\n\t\t# saved columns (with custom columns / with different column order)\n\t\tcolumns = report.custom_columns\n\n\t# unsaved custom_columns\n\tif custom_columns:\n\t\tfor custom_column in custom_columns:\n\t\t\tcolumns.insert(custom_column[\"insert_after_index\"] + 1, custom_column)\n\n\t# all columns which are not in original report\n\treport_custom_columns = [\n\t\tcolumn for column in columns if column[\"fieldname\"] not in report_column_names\n\t]\n\n\tif report_custom_columns:\n\t\tresult = add_custom_column_data(report_custom_columns, result)\n\n\tif result:\n\t\tresult = get_filtered_data(report.ref_doctype, columns, result, user)\n\n\tif cint(report.add_total_row) and result and not skip_total_row:\n\t\tresult = add_total_row(result, columns, is_tree=is_tree, parent_field=parent_field)\n\n\treturn {\n\t\t\"result\": result,\n\t\t\"columns\": columns,\n\t\t\"message\": message,\n\t\t\"chart\": chart,\n\t\t\"report_summary\": report_summary,\n\t\t\"skip_total_row\": skip_total_row or 0,\n\t\t\"status\": None,\n\t\t\"execution_time\": frappe.cache.hget(\"report_execution_time\", report.name) or 0,\n\t}\n\n\ndef normalize_result(result, columns):\n\t# Converts to list of dicts from list of lists/tuples\n\tdata = []\n\tcolumn_names = [column[\"fieldname\"] for column in columns]\n\tif result and isinstance(result[0], (list, tuple)):\n\t\tfor row in result:\n\t\t\trow_obj = {}\n\t\t\tfor idx, column_name in enumerate(column_names):\n\t\t\t\trow_obj[column_name] = row[idx]\n\t\t\tdata.append(row_obj)\n\telse:\n\t\tdata = result\n\n\treturn data\n\n\[email protected]()\ndef get_script(report_name):\n\treport = get_report_doc(report_name)\n\tmodule = report.module or frappe.db.get_value(\"DocType\", report.ref_doctype, \"module\")\n\n\tis_custom_module = frappe.get_cached_value(\"Module Def\", module, \"custom\")\n\n\t# custom modules are virtual modules those exists in DB but not in disk.\n\tmodule_path = \"\" if is_custom_module else get_module_path(module)\n\treport_folder = module_path and os.path.join(module_path, \"report\", scrub(report.name))\n\tscript_path = report_folder and os.path.join(report_folder, scrub(report.name) + \".js\")\n\tprint_path = report_folder and os.path.join(report_folder, scrub(report.name) + \".html\")\n\n\tscript = None\n\tif os.path.exists(script_path):\n\t\twith open(script_path) as f:\n\t\t\tscript = f.read()\n\t\t\tscript += f\"\\n\\n//# sourceURL={scrub(report.name)}.js\"\n\n\thtml_format = get_html_format(print_path)\n\n\tif not script and report.javascript:\n\t\tscript = report.javascript\n\t\tscript += f\"\\n\\n//# sourceURL={scrub(report.name)}__custom\"\n\n\tif not script:\n\t\tscript = \"frappe.query_reports['%s']={}\" % report_name\n\n\treturn {\n\t\t\"script\": render_include(script),\n\t\t\"html_format\": html_format,\n\t\t\"execution_time\": frappe.cache.hget(\"report_execution_time\", report_name) or 0,\n\t}\n\n\[email protected]()\[email protected]_only()\ndef run(\n\treport_name,\n\tfilters=None,\n\tuser=None,\n\tignore_prepared_report=False,\n\tcustom_columns=None,\n\tis_tree=False,\n\tparent_field=None,\n\tare_default_filters=True,\n):\n\treport = get_report_doc(report_name)\n\tif not user:\n\t\tuser = frappe.session.user\n\tif not frappe.has_permission(report.ref_doctype, \"report\"):\n\t\tfrappe.msgprint(\n\t\t\t_(\"Must have report permission to access this report.\"),\n\t\t\traise_exception=True,\n\t\t)\n\n\tresult = None\n\n\tif sbool(are_default_filters) and report.custom_filters:\n\t\tfilters = report.custom_filters\n\n\tif report.prepared_report and not ignore_prepared_report and not custom_columns:\n\t\tif filters:\n\t\t\tif isinstance(filters, str):\n\t\t\t\tfilters = json.loads(filters)\n\n\t\t\tdn = filters.pop(\"prepared_report_name\", None)\n\t\telse:\n\t\t\tdn = \"\"\n\t\tresult = get_prepared_report_result(report, filters, dn, user)\n\telse:\n\t\tresult = generate_report_result(report, filters, user, custom_columns, is_tree, parent_field)\n\t\tadd_data_to_monitor(report=report.reference_report or report.name)\n\n\tresult[\"add_total_row\"] = report.add_total_row and not result.get(\"skip_total_row\", False)\n\n\tif sbool(are_default_filters) and report.custom_filters:\n\t\tresult[\"custom_filters\"] = report.custom_filters\n\n\treturn result\n\n\ndef add_custom_column_data(custom_columns, result):\n\tcustom_column_data = get_data_for_custom_report(custom_columns)\n\n\tfor column in custom_columns:\n\t\tkey = (column.get(\"doctype\"), column.get(\"fieldname\"))\n\t\tif key in custom_column_data:\n\t\t\tfor row in result:\n\t\t\t\trow_reference = row.get(column.get(\"link_field\"))\n\t\t\t\t# possible if the row is empty\n\t\t\t\tif not row_reference:\n\t\t\t\t\tcontinue\n\t\t\t\trow[column.get(\"fieldname\")] = custom_column_data.get(key).get(row_reference)\n\n\treturn result\n\n\ndef get_prepared_report_result(report, filters, dn=\"\", user=None):\n\tfrom frappe.core.doctype.prepared_report.prepared_report import get_completed_prepared_report\n\n\tdef get_report_data(doc, data):\n\t\t# backwards compatibility - prepared report used to have a columns field,\n\t\t# we now directly fetch it from the result file\n\t\tif doc.get(\"columns\") or isinstance(data, list):\n\t\t\tcolumns = (doc.get(\"columns\") and json.loads(doc.columns)) or data[0]\n\t\t\tdata = {\"result\": data}\n\t\telse:\n\t\t\tcolumns = data.get(\"columns\")\n\n\t\tfor column in columns:\n\t\t\tif isinstance(column, dict) and column.get(\"label\"):\n\t\t\t\tcolumn[\"label\"] = _(column[\"label\"])\n\n\t\treturn data | {\"columns\": columns}\n\n\treport_data = {}\n\tif not dn:\n\t\tdn = get_completed_prepared_report(\n\t\t\tfilters, user, report.get(\"custom_report\") or report.get(\"report_name\")\n\t\t)\n\n\tdoc = frappe.get_doc(\"Prepared Report\", dn) if dn else None\n\tif doc:\n\t\ttry:\n\t\t\tif data := json.loads(doc.get_prepared_data().decode(\"utf-8\")):\n\t\t\t\treport_data = get_report_data(doc, data)\n\t\texcept Exception:\n\t\t\tdoc.log_error(\"Prepared report render failed\")\n\t\t\tfrappe.msgprint(_(\"Prepared report render failed\"))\n\t\t\tdoc = None\n\n\treturn report_data | {\"prepared_report\": True, \"doc\": doc}\n\n\[email protected]()\ndef export_query():\n\t\"\"\"export from query reports\"\"\"\n\tfrom frappe.desk.utils import get_csv_bytes, pop_csv_params, provide_binary_file\n\n\tform_params = frappe._dict(frappe.local.form_dict)\n\tcsv_params = pop_csv_params(form_params)\n\tclean_params(form_params)\n\tparse_json(form_params)\n\n\treport_name = form_params.report_name\n\tfrappe.permissions.can_export(\n\t\tfrappe.get_cached_value(\"Report\", report_name, \"ref_doctype\"),\n\t\traise_exception=True,\n\t)\n\n\tfile_format_type = form_params.file_format_type\n\tcustom_columns = frappe.parse_json(form_params.custom_columns or \"[]\")\n\tinclude_indentation = form_params.include_indentation\n\tvisible_idx = form_params.visible_idx\n\n\tif isinstance(visible_idx, str):\n\t\tvisible_idx = json.loads(visible_idx)\n\n\tdata = run(\n\t\treport_name, form_params.filters, custom_columns=custom_columns, are_default_filters=False\n\t)\n\tdata = frappe._dict(data)\n\tif not data.columns:\n\t\tfrappe.respond_as_web_page(\n\t\t\t_(\"No data to export\"),\n\t\t\t_(\"You can try changing the filters of your report.\"),\n\t\t)\n\t\treturn\n\n\tformat_duration_fields(data)\n\txlsx_data, column_widths = build_xlsx_data(data, visible_idx, include_indentation)\n\n\tif file_format_type == \"CSV\":\n\t\tcontent = get_csv_bytes(xlsx_data, csv_params)\n\t\tfile_extension = \"csv\"\n\telif file_format_type == \"Excel\":\n\t\tfrom frappe.utils.xlsxutils import make_xlsx\n\n\t\tfile_extension = \"xlsx\"\n\t\tcontent = make_xlsx(xlsx_data, \"Query Report\", column_widths=column_widths).getvalue()\n\n\tprovide_binary_file(report_name, file_extension, content)\n\n\ndef format_duration_fields(data: frappe._dict) -> None:\n\tfor i, col in enumerate(data.columns):\n\t\tif col.get(\"fieldtype\") != \"Duration\":\n\t\t\tcontinue\n\n\t\tfor row in data.result:\n\t\t\tindex = col.get(\"fieldname\") if isinstance(row, dict) else i\n\t\t\tif row[index]:\n\t\t\t\trow[index] = format_duration(row[index])\n\n\ndef build_xlsx_data(data, visible_idx, include_indentation, ignore_visible_idx=False):\n\tEXCEL_TYPES = (\n\t\tstr,\n\t\tbool,\n\t\ttype(None),\n\t\tint,\n\t\tfloat,\n\t\tdatetime.datetime,\n\t\tdatetime.date,\n\t\tdatetime.time,\n\t\tdatetime.timedelta,\n\t)\n\n\tresult = [[]]\n\tcolumn_widths = []\n\n\tfor column in data.columns:\n\t\tif column.get(\"hidden\"):\n\t\t\tcontinue\n\t\tresult[0].append(_(column.get(\"label\")))\n\t\tcolumn_width = cint(column.get(\"width\", 0))\n\t\t# to convert into scale accepted by openpyxl\n\t\tcolumn_width /= 10\n\t\tcolumn_widths.append(column_width)\n\n\t# build table from result\n\tfor row_idx, row in enumerate(data.result):\n\t\t# only pick up rows that are visible in the report\n\t\tif ignore_visible_idx or row_idx in visible_idx:\n\t\t\trow_data = []\n\t\t\tif isinstance(row, dict):\n\t\t\t\tfor col_idx, column in enumerate(data.columns):\n\t\t\t\t\tif column.get(\"hidden\"):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tlabel = column.get(\"label\")\n\t\t\t\t\tfieldname = column.get(\"fieldname\")\n\t\t\t\t\tcell_value = row.get(fieldname, row.get(label, \"\"))\n\t\t\t\t\tif not isinstance(cell_value, EXCEL_TYPES):\n\t\t\t\t\t\tcell_value = cstr(cell_value)\n\n\t\t\t\t\tif cint(include_indentation) and \"indent\" in row and col_idx == 0:\n\t\t\t\t\t\tcell_value = (\" \" * cint(row[\"indent\"])) + cstr(cell_value)\n\t\t\t\t\trow_data.append(cell_value)\n\t\t\telif row:\n\t\t\t\trow_data = row\n\n\t\t\tresult.append(row_data)\n\n\treturn result, column_widths\n\n\ndef add_total_row(result, columns, meta=None, is_tree=False, parent_field=None):\n\ttotal_row = [\"\"] * len(columns)\n\thas_percent = []\n\n\tfor i, col in enumerate(columns):\n\t\tfieldtype, options, fieldname = None, None, None\n\t\tif isinstance(col, str):\n\t\t\tif meta:\n\t\t\t\t# get fieldtype from the meta\n\t\t\t\tfield = meta.get_field(col)\n\t\t\t\tif field:\n\t\t\t\t\tfieldtype = meta.get_field(col).fieldtype\n\t\t\t\t\tfieldname = meta.get_field(col).fieldname\n\t\t\telse:\n\t\t\t\tcol = col.split(\":\")\n\t\t\t\tif len(col) > 1:\n\t\t\t\t\tif col[1]:\n\t\t\t\t\t\tfieldtype = col[1]\n\t\t\t\t\t\tif \"/\" in fieldtype:\n\t\t\t\t\t\t\tfieldtype, options = fieldtype.split(\"/\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tfieldtype = \"Data\"\n\t\telse:\n\t\t\tfieldtype = col.get(\"fieldtype\")\n\t\t\tfieldname = col.get(\"fieldname\")\n\t\t\toptions = col.get(\"options\")\n\n\t\tfor row in result:\n\t\t\tif i >= len(row):\n\t\t\t\tcontinue\n\t\t\tcell = row.get(fieldname) if isinstance(row, dict) else row[i]\n\t\t\tif fieldtype in [\"Currency\", \"Int\", \"Float\", \"Percent\", \"Duration\"] and flt(cell):\n\t\t\t\tif not (is_tree and row.get(parent_field)):\n\t\t\t\t\ttotal_row[i] = flt(total_row[i]) + flt(cell)\n\n\t\t\tif fieldtype == \"Percent\" and i not in has_percent:\n\t\t\t\thas_percent.append(i)\n\n\t\t\tif fieldtype == \"Time\" and cell:\n\t\t\t\tif not total_row[i]:\n\t\t\t\t\ttotal_row[i] = timedelta(hours=0, minutes=0, seconds=0)\n\t\t\t\ttotal_row[i] = total_row[i] + cell\n\n\t\tif fieldtype == \"Link\" and options == \"Currency\":\n\t\t\ttotal_row[i] = result[0].get(fieldname) if isinstance(result[0], dict) else result[0][i]\n\n\tfor i in has_percent:\n\t\ttotal_row[i] = flt(total_row[i]) / len(result)\n\n\tfirst_col_fieldtype = None\n\tif isinstance(columns[0], str):\n\t\tfirst_col = columns[0].split(\":\")\n\t\tif len(first_col) > 1:\n\t\t\tfirst_col_fieldtype = first_col[1].split(\"/\", 1)[0]\n\telse:\n\t\tfirst_col_fieldtype = columns[0].get(\"fieldtype\")\n\n\tif first_col_fieldtype not in [\"Currency\", \"Int\", \"Float\", \"Percent\", \"Date\"]:\n\t\ttotal_row[0] = _(\"Total\")\n\n\tresult.append(total_row)\n\treturn result\n\n\[email protected]()\ndef get_data_for_custom_field(doctype, field):\n\n\tif not frappe.has_permission(doctype, \"read\"):\n\t\tfrappe.throw(_(\"Not Permitted to read {0}\").format(doctype), frappe.PermissionError)\n\n\tvalue_map = frappe._dict(frappe.get_all(doctype, fields=[\"name\", field], as_list=1))\n\n\treturn value_map\n\n\ndef get_data_for_custom_report(columns):\n\tdoc_field_value_map = {}\n\n\tfor column in columns:\n\t\tif column.get(\"link_field\"):\n\t\t\tfieldname = column.get(\"fieldname\")\n\t\t\tdoctype = column.get(\"doctype\")\n\t\t\tdoc_field_value_map[(doctype, fieldname)] = get_data_for_custom_field(doctype, fieldname)\n\n\treturn doc_field_value_map\n\n\[email protected]()\ndef save_report(reference_report, report_name, columns, filters):\n\treport_doc = get_report_doc(reference_report)\n\n\tdocname = frappe.db.exists(\n\t\t\"Report\",\n\t\t{\n\t\t\t\"report_name\": report_name,\n\t\t\t\"is_standard\": \"No\",\n\t\t\t\"report_type\": \"Custom Report\",\n\t\t},\n\t)\n\n\tif docname:\n\t\treport = frappe.get_doc(\"Report\", docname)\n\t\texisting_jd = json.loads(report.json)\n\t\texisting_jd[\"columns\"] = json.loads(columns)\n\t\texisting_jd[\"filters\"] = json.loads(filters)\n\t\treport.update({\"json\": json.dumps(existing_jd, separators=(\",\", \":\"))})\n\t\treport.save()\n\t\tfrappe.msgprint(_(\"Report updated successfully\"))\n\n\t\treturn docname\n\telse:\n\t\tnew_report = frappe.get_doc(\n\t\t\t{\n\t\t\t\t\"doctype\": \"Report\",\n\t\t\t\t\"report_name\": report_name,\n\t\t\t\t\"json\": f'{{\"columns\":{columns},\"filters\":{filters}}}',\n\t\t\t\t\"ref_doctype\": report_doc.ref_doctype,\n\t\t\t\t\"is_standard\": \"No\",\n\t\t\t\t\"report_type\": \"Custom Report\",\n\t\t\t\t\"reference_report\": reference_report,\n\t\t\t}\n\t\t).insert(ignore_permissions=True)\n\t\tfrappe.msgprint(_(\"{0} saved successfully\").format(new_report.name))\n\t\treturn new_report.name\n\n\ndef get_filtered_data(ref_doctype, columns, data, user):\n\tresult = []\n\tlinked_doctypes = get_linked_doctypes(columns, data)\n\tmatch_filters_per_doctype = get_user_match_filters(linked_doctypes, user=user)\n\tshared = frappe.share.get_shared(ref_doctype, user)\n\tcolumns_dict = get_columns_dict(columns)\n\n\trole_permissions = get_role_permissions(frappe.get_meta(ref_doctype), user)\n\tif_owner = role_permissions.get(\"if_owner\", {}).get(\"report\")\n\n\tif match_filters_per_doctype:\n\t\tfor row in data:\n\t\t\t# Why linked_doctypes.get(ref_doctype)? because if column is empty, linked_doctypes[ref_doctype] is removed\n\t\t\tif linked_doctypes.get(ref_doctype) and shared and row[linked_doctypes[ref_doctype]] in shared:\n\t\t\t\tresult.append(row)\n\n\t\t\telif has_match(\n\t\t\t\trow,\n\t\t\t\tlinked_doctypes,\n\t\t\t\tmatch_filters_per_doctype,\n\t\t\t\tref_doctype,\n\t\t\t\tif_owner,\n\t\t\t\tcolumns_dict,\n\t\t\t\tuser,\n\t\t\t):\n\t\t\t\tresult.append(row)\n\telse:\n\t\tresult = list(data)\n\n\treturn result\n\n\ndef has_match(\n\trow,\n\tlinked_doctypes,\n\tdoctype_match_filters,\n\tref_doctype,\n\tif_owner,\n\tcolumns_dict,\n\tuser,\n):\n\t\"\"\"Returns True if after evaluating permissions for each linked doctype\n\t- There is an owner match for the ref_doctype\n\t- `and` There is a user permission match for all linked doctypes\n\n\tReturns True if the row is empty\n\n\tNote:\n\tEach doctype could have multiple conflicting user permission doctypes.\n\tHence even if one of the sets allows a match, it is true.\n\tThis behavior is equivalent to the trickling of user permissions of linked doctypes to the ref doctype.\n\t\"\"\"\n\tresultant_match = True\n\n\tif not row:\n\t\t# allow empty rows :)\n\t\treturn resultant_match\n\n\tfor doctype, filter_list in doctype_match_filters.items():\n\t\tmatched_for_doctype = False\n\n\t\tif doctype == ref_doctype and if_owner:\n\t\t\tidx = linked_doctypes.get(\"User\")\n\t\t\tif idx is not None and row[idx] == user and columns_dict[idx] == columns_dict.get(\"owner\"):\n\t\t\t\t# owner match is true\n\t\t\t\tmatched_for_doctype = True\n\n\t\tif not matched_for_doctype:\n\t\t\tfor match_filters in filter_list:\n\t\t\t\tmatch = True\n\t\t\t\tfor dt, idx in linked_doctypes.items():\n\t\t\t\t\t# case handled above\n\t\t\t\t\tif dt == \"User\" and columns_dict[idx] == columns_dict.get(\"owner\"):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tcell_value = None\n\t\t\t\t\tif isinstance(row, dict):\n\t\t\t\t\t\tcell_value = row.get(idx)\n\t\t\t\t\telif isinstance(row, (list, tuple)):\n\t\t\t\t\t\tcell_value = row[idx]\n\n\t\t\t\t\tif (\n\t\t\t\t\t\tdt in match_filters\n\t\t\t\t\t\tand cell_value not in match_filters.get(dt)\n\t\t\t\t\t\tand frappe.db.exists(dt, cell_value)\n\t\t\t\t\t):\n\t\t\t\t\t\tmatch = False\n\t\t\t\t\t\tbreak\n\n\t\t\t\t# each doctype could have multiple conflicting user permission doctypes, hence using OR\n\t\t\t\t# so that even if one of the sets allows a match, it is true\n\t\t\t\tmatched_for_doctype = matched_for_doctype or match\n\n\t\t\t\tif matched_for_doctype:\n\t\t\t\t\tbreak\n\n\t\t# each doctype's user permissions should match the row! hence using AND\n\t\tresultant_match = resultant_match and matched_for_doctype\n\n\t\tif not resultant_match:\n\t\t\tbreak\n\n\treturn resultant_match\n\n\ndef get_linked_doctypes(columns, data):\n\tlinked_doctypes = {}\n\n\tcolumns_dict = get_columns_dict(columns)\n\n\tfor idx, col in enumerate(columns):\n\t\tdf = columns_dict[idx]\n\t\tif df.get(\"fieldtype\") == \"Link\":\n\t\t\tif data and isinstance(data[0], (list, tuple)):\n\t\t\t\tlinked_doctypes[df[\"options\"]] = idx\n\t\t\telse:\n\t\t\t\t# dict\n\t\t\t\tlinked_doctypes[df[\"options\"]] = df[\"fieldname\"]\n\n\t# remove doctype if column is empty\n\tcolumns_with_value = []\n\tfor row in data:\n\t\tif row:\n\t\t\tif len(row) != len(columns_with_value):\n\t\t\t\tif isinstance(row, (list, tuple)):\n\t\t\t\t\trow = enumerate(row)\n\t\t\t\telif isinstance(row, dict):\n\t\t\t\t\trow = row.items()\n\n\t\t\t\tfor col, val in row:\n\t\t\t\t\tif val and col not in columns_with_value:\n\t\t\t\t\t\tcolumns_with_value.append(col)\n\n\titems = list(linked_doctypes.items())\n\n\tfor doctype, key in items:\n\t\tif key not in columns_with_value:\n\t\t\tdel linked_doctypes[doctype]\n\n\treturn linked_doctypes\n\n\ndef get_columns_dict(columns):\n\t\"\"\"Returns a dict with column docfield values as dict\n\tThe keys for the dict are both idx and fieldname,\n\tso either index or fieldname can be used to search for a column's docfield properties\n\t\"\"\"\n\tcolumns_dict = frappe._dict()\n\tfor idx, col in enumerate(columns):\n\t\tcol_dict = get_column_as_dict(col)\n\t\tcolumns_dict[idx] = col_dict\n\t\tcolumns_dict[col_dict[\"fieldname\"]] = col_dict\n\n\treturn columns_dict\n\n\ndef get_column_as_dict(col):\n\tcol_dict = frappe._dict()\n\n\t# string\n\tif isinstance(col, str):\n\t\tcol = col.split(\":\")\n\t\tif len(col) > 1:\n\t\t\tif \"/\" in col[1]:\n\t\t\t\tcol_dict[\"fieldtype\"], col_dict[\"options\"] = col[1].split(\"/\")\n\t\t\telse:\n\t\t\t\tcol_dict[\"fieldtype\"] = col[1]\n\t\t\tif len(col) == 3:\n\t\t\t\tcol_dict[\"width\"] = col[2]\n\n\t\tcol_dict[\"label\"] = col[0]\n\t\tcol_dict[\"fieldname\"] = frappe.scrub(col[0])\n\n\t# dict\n\telse:\n\t\tcol_dict.update(col)\n\t\tif \"fieldname\" not in col_dict:\n\t\t\tcol_dict[\"fieldname\"] = frappe.scrub(col_dict[\"label\"])\n\n\treturn col_dict\n\n\ndef get_user_match_filters(doctypes, user):\n\tmatch_filters = {}\n\n\tfor dt in doctypes:\n\t\tfilter_list = frappe.desk.reportview.build_match_conditions(dt, user, False)\n\t\tif filter_list:\n\t\t\tmatch_filters[dt] = filter_list\n\n\treturn match_filters\n",
"path": "frappe/desk/query_report.py"
}
] | [
{
"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\nimport datetime\nimport json\nimport os\nfrom datetime import timedelta\n\nimport frappe\nimport frappe.desk.reportview\nfrom frappe import _\nfrom frappe.core.utils import ljust_list\nfrom frappe.desk.reportview import clean_params, parse_json\nfrom frappe.model.utils import render_include\nfrom frappe.modules import get_module_path, scrub\nfrom frappe.monitor import add_data_to_monitor\nfrom frappe.permissions import get_role_permissions\nfrom frappe.utils import cint, cstr, flt, format_duration, get_html_format, sbool\n\n\ndef get_report_doc(report_name):\n\tdoc = frappe.get_doc(\"Report\", report_name)\n\tdoc.custom_columns = []\n\tdoc.custom_filters = []\n\n\tif doc.report_type == \"Custom Report\":\n\t\tcustom_report_doc = doc\n\t\treference_report = custom_report_doc.reference_report\n\t\tdoc = frappe.get_doc(\"Report\", reference_report)\n\t\tdoc.custom_report = report_name\n\t\tif custom_report_doc.json:\n\t\t\tdata = json.loads(custom_report_doc.json)\n\t\t\tif data:\n\t\t\t\tdoc.custom_columns = data.get(\"columns\")\n\t\t\t\tdoc.custom_filters = data.get(\"filters\")\n\t\tdoc.is_custom_report = True\n\n\tif not doc.is_permitted():\n\t\tfrappe.throw(\n\t\t\t_(\"You don't have access to Report: {0}\").format(report_name),\n\t\t\tfrappe.PermissionError,\n\t\t)\n\n\tif not frappe.has_permission(doc.ref_doctype, \"report\"):\n\t\tfrappe.throw(\n\t\t\t_(\"You don't have permission to get a report on: {0}\").format(doc.ref_doctype),\n\t\t\tfrappe.PermissionError,\n\t\t)\n\n\tif doc.disabled:\n\t\tfrappe.throw(_(\"Report {0} is disabled\").format(report_name))\n\n\treturn doc\n\n\ndef get_report_result(report, filters):\n\tres = None\n\n\tif report.report_type == \"Query Report\":\n\t\tres = report.execute_query_report(filters)\n\n\telif report.report_type == \"Script Report\":\n\t\tres = report.execute_script_report(filters)\n\n\telif report.report_type == \"Custom Report\":\n\t\tref_report = get_report_doc(report.report_name)\n\t\tres = get_report_result(ref_report, filters)\n\n\treturn res\n\n\[email protected]_only()\ndef generate_report_result(\n\treport, filters=None, user=None, custom_columns=None, is_tree=False, parent_field=None\n):\n\tuser = user or frappe.session.user\n\tfilters = filters or []\n\n\tif filters and isinstance(filters, str):\n\t\tfilters = json.loads(filters)\n\n\tres = get_report_result(report, filters) or []\n\n\tcolumns, result, message, chart, report_summary, skip_total_row = ljust_list(res, 6)\n\tcolumns = [get_column_as_dict(col) for col in (columns or [])]\n\treport_column_names = [col[\"fieldname\"] for col in columns]\n\n\t# convert to list of dicts\n\tresult = normalize_result(result, columns)\n\n\tif report.custom_columns:\n\t\t# saved columns (with custom columns / with different column order)\n\t\tcolumns = report.custom_columns\n\n\t# unsaved custom_columns\n\tif custom_columns:\n\t\tfor custom_column in custom_columns:\n\t\t\tcolumns.insert(custom_column[\"insert_after_index\"] + 1, custom_column)\n\n\t# all columns which are not in original report\n\treport_custom_columns = [\n\t\tcolumn for column in columns if column[\"fieldname\"] not in report_column_names\n\t]\n\n\tif report_custom_columns:\n\t\tresult = add_custom_column_data(report_custom_columns, result)\n\n\tif result:\n\t\tresult = get_filtered_data(report.ref_doctype, columns, result, user)\n\n\tif cint(report.add_total_row) and result and not skip_total_row:\n\t\tresult = add_total_row(result, columns, is_tree=is_tree, parent_field=parent_field)\n\n\treturn {\n\t\t\"result\": result,\n\t\t\"columns\": columns,\n\t\t\"message\": message,\n\t\t\"chart\": chart,\n\t\t\"report_summary\": report_summary,\n\t\t\"skip_total_row\": skip_total_row or 0,\n\t\t\"status\": None,\n\t\t\"execution_time\": frappe.cache.hget(\"report_execution_time\", report.name) or 0,\n\t}\n\n\ndef normalize_result(result, columns):\n\t# Converts to list of dicts from list of lists/tuples\n\tdata = []\n\tcolumn_names = [column[\"fieldname\"] for column in columns]\n\tif result and isinstance(result[0], (list, tuple)):\n\t\tfor row in result:\n\t\t\trow_obj = {}\n\t\t\tfor idx, column_name in enumerate(column_names):\n\t\t\t\trow_obj[column_name] = row[idx]\n\t\t\tdata.append(row_obj)\n\telse:\n\t\tdata = result\n\n\treturn data\n\n\[email protected]()\ndef get_script(report_name):\n\treport = get_report_doc(report_name)\n\tmodule = report.module or frappe.db.get_value(\"DocType\", report.ref_doctype, \"module\")\n\n\tis_custom_module = frappe.get_cached_value(\"Module Def\", module, \"custom\")\n\n\t# custom modules are virtual modules those exists in DB but not in disk.\n\tmodule_path = \"\" if is_custom_module else get_module_path(module)\n\treport_folder = module_path and os.path.join(module_path, \"report\", scrub(report.name))\n\tscript_path = report_folder and os.path.join(report_folder, scrub(report.name) + \".js\")\n\tprint_path = report_folder and os.path.join(report_folder, scrub(report.name) + \".html\")\n\n\tscript = None\n\tif os.path.exists(script_path):\n\t\twith open(script_path) as f:\n\t\t\tscript = f.read()\n\t\t\tscript += f\"\\n\\n//# sourceURL={scrub(report.name)}.js\"\n\n\thtml_format = get_html_format(print_path)\n\n\tif not script and report.javascript:\n\t\tscript = report.javascript\n\t\tscript += f\"\\n\\n//# sourceURL={scrub(report.name)}__custom\"\n\n\tif not script:\n\t\tscript = \"frappe.query_reports['%s']={}\" % report_name\n\n\treturn {\n\t\t\"script\": render_include(script),\n\t\t\"html_format\": html_format,\n\t\t\"execution_time\": frappe.cache.hget(\"report_execution_time\", report_name) or 0,\n\t\t\"filters\": report.filters,\n\t}\n\n\[email protected]()\[email protected]_only()\ndef run(\n\treport_name,\n\tfilters=None,\n\tuser=None,\n\tignore_prepared_report=False,\n\tcustom_columns=None,\n\tis_tree=False,\n\tparent_field=None,\n\tare_default_filters=True,\n):\n\treport = get_report_doc(report_name)\n\tif not user:\n\t\tuser = frappe.session.user\n\tif not frappe.has_permission(report.ref_doctype, \"report\"):\n\t\tfrappe.msgprint(\n\t\t\t_(\"Must have report permission to access this report.\"),\n\t\t\traise_exception=True,\n\t\t)\n\n\tresult = None\n\n\tif sbool(are_default_filters) and report.custom_filters:\n\t\tfilters = report.custom_filters\n\n\tif report.prepared_report and not ignore_prepared_report and not custom_columns:\n\t\tif filters:\n\t\t\tif isinstance(filters, str):\n\t\t\t\tfilters = json.loads(filters)\n\n\t\t\tdn = filters.pop(\"prepared_report_name\", None)\n\t\telse:\n\t\t\tdn = \"\"\n\t\tresult = get_prepared_report_result(report, filters, dn, user)\n\telse:\n\t\tresult = generate_report_result(report, filters, user, custom_columns, is_tree, parent_field)\n\t\tadd_data_to_monitor(report=report.reference_report or report.name)\n\n\tresult[\"add_total_row\"] = report.add_total_row and not result.get(\"skip_total_row\", False)\n\n\tif sbool(are_default_filters) and report.custom_filters:\n\t\tresult[\"custom_filters\"] = report.custom_filters\n\n\treturn result\n\n\ndef add_custom_column_data(custom_columns, result):\n\tcustom_column_data = get_data_for_custom_report(custom_columns)\n\n\tfor column in custom_columns:\n\t\tkey = (column.get(\"doctype\"), column.get(\"fieldname\"))\n\t\tif key in custom_column_data:\n\t\t\tfor row in result:\n\t\t\t\trow_reference = row.get(column.get(\"link_field\"))\n\t\t\t\t# possible if the row is empty\n\t\t\t\tif not row_reference:\n\t\t\t\t\tcontinue\n\t\t\t\trow[column.get(\"fieldname\")] = custom_column_data.get(key).get(row_reference)\n\n\treturn result\n\n\ndef get_prepared_report_result(report, filters, dn=\"\", user=None):\n\tfrom frappe.core.doctype.prepared_report.prepared_report import get_completed_prepared_report\n\n\tdef get_report_data(doc, data):\n\t\t# backwards compatibility - prepared report used to have a columns field,\n\t\t# we now directly fetch it from the result file\n\t\tif doc.get(\"columns\") or isinstance(data, list):\n\t\t\tcolumns = (doc.get(\"columns\") and json.loads(doc.columns)) or data[0]\n\t\t\tdata = {\"result\": data}\n\t\telse:\n\t\t\tcolumns = data.get(\"columns\")\n\n\t\tfor column in columns:\n\t\t\tif isinstance(column, dict) and column.get(\"label\"):\n\t\t\t\tcolumn[\"label\"] = _(column[\"label\"])\n\n\t\treturn data | {\"columns\": columns}\n\n\treport_data = {}\n\tif not dn:\n\t\tdn = get_completed_prepared_report(\n\t\t\tfilters, user, report.get(\"custom_report\") or report.get(\"report_name\")\n\t\t)\n\n\tdoc = frappe.get_doc(\"Prepared Report\", dn) if dn else None\n\tif doc:\n\t\ttry:\n\t\t\tif data := json.loads(doc.get_prepared_data().decode(\"utf-8\")):\n\t\t\t\treport_data = get_report_data(doc, data)\n\t\texcept Exception:\n\t\t\tdoc.log_error(\"Prepared report render failed\")\n\t\t\tfrappe.msgprint(_(\"Prepared report render failed\"))\n\t\t\tdoc = None\n\n\treturn report_data | {\"prepared_report\": True, \"doc\": doc}\n\n\[email protected]()\ndef export_query():\n\t\"\"\"export from query reports\"\"\"\n\tfrom frappe.desk.utils import get_csv_bytes, pop_csv_params, provide_binary_file\n\n\tform_params = frappe._dict(frappe.local.form_dict)\n\tcsv_params = pop_csv_params(form_params)\n\tclean_params(form_params)\n\tparse_json(form_params)\n\n\treport_name = form_params.report_name\n\tfrappe.permissions.can_export(\n\t\tfrappe.get_cached_value(\"Report\", report_name, \"ref_doctype\"),\n\t\traise_exception=True,\n\t)\n\n\tfile_format_type = form_params.file_format_type\n\tcustom_columns = frappe.parse_json(form_params.custom_columns or \"[]\")\n\tinclude_indentation = form_params.include_indentation\n\tvisible_idx = form_params.visible_idx\n\n\tif isinstance(visible_idx, str):\n\t\tvisible_idx = json.loads(visible_idx)\n\n\tdata = run(\n\t\treport_name, form_params.filters, custom_columns=custom_columns, are_default_filters=False\n\t)\n\tdata = frappe._dict(data)\n\tif not data.columns:\n\t\tfrappe.respond_as_web_page(\n\t\t\t_(\"No data to export\"),\n\t\t\t_(\"You can try changing the filters of your report.\"),\n\t\t)\n\t\treturn\n\n\tformat_duration_fields(data)\n\txlsx_data, column_widths = build_xlsx_data(data, visible_idx, include_indentation)\n\n\tif file_format_type == \"CSV\":\n\t\tcontent = get_csv_bytes(xlsx_data, csv_params)\n\t\tfile_extension = \"csv\"\n\telif file_format_type == \"Excel\":\n\t\tfrom frappe.utils.xlsxutils import make_xlsx\n\n\t\tfile_extension = \"xlsx\"\n\t\tcontent = make_xlsx(xlsx_data, \"Query Report\", column_widths=column_widths).getvalue()\n\n\tprovide_binary_file(report_name, file_extension, content)\n\n\ndef format_duration_fields(data: frappe._dict) -> None:\n\tfor i, col in enumerate(data.columns):\n\t\tif col.get(\"fieldtype\") != \"Duration\":\n\t\t\tcontinue\n\n\t\tfor row in data.result:\n\t\t\tindex = col.get(\"fieldname\") if isinstance(row, dict) else i\n\t\t\tif row[index]:\n\t\t\t\trow[index] = format_duration(row[index])\n\n\ndef build_xlsx_data(data, visible_idx, include_indentation, ignore_visible_idx=False):\n\tEXCEL_TYPES = (\n\t\tstr,\n\t\tbool,\n\t\ttype(None),\n\t\tint,\n\t\tfloat,\n\t\tdatetime.datetime,\n\t\tdatetime.date,\n\t\tdatetime.time,\n\t\tdatetime.timedelta,\n\t)\n\n\tresult = [[]]\n\tcolumn_widths = []\n\n\tfor column in data.columns:\n\t\tif column.get(\"hidden\"):\n\t\t\tcontinue\n\t\tresult[0].append(_(column.get(\"label\")))\n\t\tcolumn_width = cint(column.get(\"width\", 0))\n\t\t# to convert into scale accepted by openpyxl\n\t\tcolumn_width /= 10\n\t\tcolumn_widths.append(column_width)\n\n\t# build table from result\n\tfor row_idx, row in enumerate(data.result):\n\t\t# only pick up rows that are visible in the report\n\t\tif ignore_visible_idx or row_idx in visible_idx:\n\t\t\trow_data = []\n\t\t\tif isinstance(row, dict):\n\t\t\t\tfor col_idx, column in enumerate(data.columns):\n\t\t\t\t\tif column.get(\"hidden\"):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tlabel = column.get(\"label\")\n\t\t\t\t\tfieldname = column.get(\"fieldname\")\n\t\t\t\t\tcell_value = row.get(fieldname, row.get(label, \"\"))\n\t\t\t\t\tif not isinstance(cell_value, EXCEL_TYPES):\n\t\t\t\t\t\tcell_value = cstr(cell_value)\n\n\t\t\t\t\tif cint(include_indentation) and \"indent\" in row and col_idx == 0:\n\t\t\t\t\t\tcell_value = (\" \" * cint(row[\"indent\"])) + cstr(cell_value)\n\t\t\t\t\trow_data.append(cell_value)\n\t\t\telif row:\n\t\t\t\trow_data = row\n\n\t\t\tresult.append(row_data)\n\n\treturn result, column_widths\n\n\ndef add_total_row(result, columns, meta=None, is_tree=False, parent_field=None):\n\ttotal_row = [\"\"] * len(columns)\n\thas_percent = []\n\n\tfor i, col in enumerate(columns):\n\t\tfieldtype, options, fieldname = None, None, None\n\t\tif isinstance(col, str):\n\t\t\tif meta:\n\t\t\t\t# get fieldtype from the meta\n\t\t\t\tfield = meta.get_field(col)\n\t\t\t\tif field:\n\t\t\t\t\tfieldtype = meta.get_field(col).fieldtype\n\t\t\t\t\tfieldname = meta.get_field(col).fieldname\n\t\t\telse:\n\t\t\t\tcol = col.split(\":\")\n\t\t\t\tif len(col) > 1:\n\t\t\t\t\tif col[1]:\n\t\t\t\t\t\tfieldtype = col[1]\n\t\t\t\t\t\tif \"/\" in fieldtype:\n\t\t\t\t\t\t\tfieldtype, options = fieldtype.split(\"/\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tfieldtype = \"Data\"\n\t\telse:\n\t\t\tfieldtype = col.get(\"fieldtype\")\n\t\t\tfieldname = col.get(\"fieldname\")\n\t\t\toptions = col.get(\"options\")\n\n\t\tfor row in result:\n\t\t\tif i >= len(row):\n\t\t\t\tcontinue\n\t\t\tcell = row.get(fieldname) if isinstance(row, dict) else row[i]\n\t\t\tif fieldtype in [\"Currency\", \"Int\", \"Float\", \"Percent\", \"Duration\"] and flt(cell):\n\t\t\t\tif not (is_tree and row.get(parent_field)):\n\t\t\t\t\ttotal_row[i] = flt(total_row[i]) + flt(cell)\n\n\t\t\tif fieldtype == \"Percent\" and i not in has_percent:\n\t\t\t\thas_percent.append(i)\n\n\t\t\tif fieldtype == \"Time\" and cell:\n\t\t\t\tif not total_row[i]:\n\t\t\t\t\ttotal_row[i] = timedelta(hours=0, minutes=0, seconds=0)\n\t\t\t\ttotal_row[i] = total_row[i] + cell\n\n\t\tif fieldtype == \"Link\" and options == \"Currency\":\n\t\t\ttotal_row[i] = result[0].get(fieldname) if isinstance(result[0], dict) else result[0][i]\n\n\tfor i in has_percent:\n\t\ttotal_row[i] = flt(total_row[i]) / len(result)\n\n\tfirst_col_fieldtype = None\n\tif isinstance(columns[0], str):\n\t\tfirst_col = columns[0].split(\":\")\n\t\tif len(first_col) > 1:\n\t\t\tfirst_col_fieldtype = first_col[1].split(\"/\", 1)[0]\n\telse:\n\t\tfirst_col_fieldtype = columns[0].get(\"fieldtype\")\n\n\tif first_col_fieldtype not in [\"Currency\", \"Int\", \"Float\", \"Percent\", \"Date\"]:\n\t\ttotal_row[0] = _(\"Total\")\n\n\tresult.append(total_row)\n\treturn result\n\n\[email protected]()\ndef get_data_for_custom_field(doctype, field):\n\n\tif not frappe.has_permission(doctype, \"read\"):\n\t\tfrappe.throw(_(\"Not Permitted to read {0}\").format(doctype), frappe.PermissionError)\n\n\tvalue_map = frappe._dict(frappe.get_all(doctype, fields=[\"name\", field], as_list=1))\n\n\treturn value_map\n\n\ndef get_data_for_custom_report(columns):\n\tdoc_field_value_map = {}\n\n\tfor column in columns:\n\t\tif column.get(\"link_field\"):\n\t\t\tfieldname = column.get(\"fieldname\")\n\t\t\tdoctype = column.get(\"doctype\")\n\t\t\tdoc_field_value_map[(doctype, fieldname)] = get_data_for_custom_field(doctype, fieldname)\n\n\treturn doc_field_value_map\n\n\[email protected]()\ndef save_report(reference_report, report_name, columns, filters):\n\treport_doc = get_report_doc(reference_report)\n\n\tdocname = frappe.db.exists(\n\t\t\"Report\",\n\t\t{\n\t\t\t\"report_name\": report_name,\n\t\t\t\"is_standard\": \"No\",\n\t\t\t\"report_type\": \"Custom Report\",\n\t\t},\n\t)\n\n\tif docname:\n\t\treport = frappe.get_doc(\"Report\", docname)\n\t\texisting_jd = json.loads(report.json)\n\t\texisting_jd[\"columns\"] = json.loads(columns)\n\t\texisting_jd[\"filters\"] = json.loads(filters)\n\t\treport.update({\"json\": json.dumps(existing_jd, separators=(\",\", \":\"))})\n\t\treport.save()\n\t\tfrappe.msgprint(_(\"Report updated successfully\"))\n\n\t\treturn docname\n\telse:\n\t\tnew_report = frappe.get_doc(\n\t\t\t{\n\t\t\t\t\"doctype\": \"Report\",\n\t\t\t\t\"report_name\": report_name,\n\t\t\t\t\"json\": f'{{\"columns\":{columns},\"filters\":{filters}}}',\n\t\t\t\t\"ref_doctype\": report_doc.ref_doctype,\n\t\t\t\t\"is_standard\": \"No\",\n\t\t\t\t\"report_type\": \"Custom Report\",\n\t\t\t\t\"reference_report\": reference_report,\n\t\t\t}\n\t\t).insert(ignore_permissions=True)\n\t\tfrappe.msgprint(_(\"{0} saved successfully\").format(new_report.name))\n\t\treturn new_report.name\n\n\ndef get_filtered_data(ref_doctype, columns, data, user):\n\tresult = []\n\tlinked_doctypes = get_linked_doctypes(columns, data)\n\tmatch_filters_per_doctype = get_user_match_filters(linked_doctypes, user=user)\n\tshared = frappe.share.get_shared(ref_doctype, user)\n\tcolumns_dict = get_columns_dict(columns)\n\n\trole_permissions = get_role_permissions(frappe.get_meta(ref_doctype), user)\n\tif_owner = role_permissions.get(\"if_owner\", {}).get(\"report\")\n\n\tif match_filters_per_doctype:\n\t\tfor row in data:\n\t\t\t# Why linked_doctypes.get(ref_doctype)? because if column is empty, linked_doctypes[ref_doctype] is removed\n\t\t\tif linked_doctypes.get(ref_doctype) and shared and row[linked_doctypes[ref_doctype]] in shared:\n\t\t\t\tresult.append(row)\n\n\t\t\telif has_match(\n\t\t\t\trow,\n\t\t\t\tlinked_doctypes,\n\t\t\t\tmatch_filters_per_doctype,\n\t\t\t\tref_doctype,\n\t\t\t\tif_owner,\n\t\t\t\tcolumns_dict,\n\t\t\t\tuser,\n\t\t\t):\n\t\t\t\tresult.append(row)\n\telse:\n\t\tresult = list(data)\n\n\treturn result\n\n\ndef has_match(\n\trow,\n\tlinked_doctypes,\n\tdoctype_match_filters,\n\tref_doctype,\n\tif_owner,\n\tcolumns_dict,\n\tuser,\n):\n\t\"\"\"Returns True if after evaluating permissions for each linked doctype\n\t- There is an owner match for the ref_doctype\n\t- `and` There is a user permission match for all linked doctypes\n\n\tReturns True if the row is empty\n\n\tNote:\n\tEach doctype could have multiple conflicting user permission doctypes.\n\tHence even if one of the sets allows a match, it is true.\n\tThis behavior is equivalent to the trickling of user permissions of linked doctypes to the ref doctype.\n\t\"\"\"\n\tresultant_match = True\n\n\tif not row:\n\t\t# allow empty rows :)\n\t\treturn resultant_match\n\n\tfor doctype, filter_list in doctype_match_filters.items():\n\t\tmatched_for_doctype = False\n\n\t\tif doctype == ref_doctype and if_owner:\n\t\t\tidx = linked_doctypes.get(\"User\")\n\t\t\tif idx is not None and row[idx] == user and columns_dict[idx] == columns_dict.get(\"owner\"):\n\t\t\t\t# owner match is true\n\t\t\t\tmatched_for_doctype = True\n\n\t\tif not matched_for_doctype:\n\t\t\tfor match_filters in filter_list:\n\t\t\t\tmatch = True\n\t\t\t\tfor dt, idx in linked_doctypes.items():\n\t\t\t\t\t# case handled above\n\t\t\t\t\tif dt == \"User\" and columns_dict[idx] == columns_dict.get(\"owner\"):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tcell_value = None\n\t\t\t\t\tif isinstance(row, dict):\n\t\t\t\t\t\tcell_value = row.get(idx)\n\t\t\t\t\telif isinstance(row, (list, tuple)):\n\t\t\t\t\t\tcell_value = row[idx]\n\n\t\t\t\t\tif (\n\t\t\t\t\t\tdt in match_filters\n\t\t\t\t\t\tand cell_value not in match_filters.get(dt)\n\t\t\t\t\t\tand frappe.db.exists(dt, cell_value)\n\t\t\t\t\t):\n\t\t\t\t\t\tmatch = False\n\t\t\t\t\t\tbreak\n\n\t\t\t\t# each doctype could have multiple conflicting user permission doctypes, hence using OR\n\t\t\t\t# so that even if one of the sets allows a match, it is true\n\t\t\t\tmatched_for_doctype = matched_for_doctype or match\n\n\t\t\t\tif matched_for_doctype:\n\t\t\t\t\tbreak\n\n\t\t# each doctype's user permissions should match the row! hence using AND\n\t\tresultant_match = resultant_match and matched_for_doctype\n\n\t\tif not resultant_match:\n\t\t\tbreak\n\n\treturn resultant_match\n\n\ndef get_linked_doctypes(columns, data):\n\tlinked_doctypes = {}\n\n\tcolumns_dict = get_columns_dict(columns)\n\n\tfor idx, col in enumerate(columns):\n\t\tdf = columns_dict[idx]\n\t\tif df.get(\"fieldtype\") == \"Link\":\n\t\t\tif data and isinstance(data[0], (list, tuple)):\n\t\t\t\tlinked_doctypes[df[\"options\"]] = idx\n\t\t\telse:\n\t\t\t\t# dict\n\t\t\t\tlinked_doctypes[df[\"options\"]] = df[\"fieldname\"]\n\n\t# remove doctype if column is empty\n\tcolumns_with_value = []\n\tfor row in data:\n\t\tif row:\n\t\t\tif len(row) != len(columns_with_value):\n\t\t\t\tif isinstance(row, (list, tuple)):\n\t\t\t\t\trow = enumerate(row)\n\t\t\t\telif isinstance(row, dict):\n\t\t\t\t\trow = row.items()\n\n\t\t\t\tfor col, val in row:\n\t\t\t\t\tif val and col not in columns_with_value:\n\t\t\t\t\t\tcolumns_with_value.append(col)\n\n\titems = list(linked_doctypes.items())\n\n\tfor doctype, key in items:\n\t\tif key not in columns_with_value:\n\t\t\tdel linked_doctypes[doctype]\n\n\treturn linked_doctypes\n\n\ndef get_columns_dict(columns):\n\t\"\"\"Returns a dict with column docfield values as dict\n\tThe keys for the dict are both idx and fieldname,\n\tso either index or fieldname can be used to search for a column's docfield properties\n\t\"\"\"\n\tcolumns_dict = frappe._dict()\n\tfor idx, col in enumerate(columns):\n\t\tcol_dict = get_column_as_dict(col)\n\t\tcolumns_dict[idx] = col_dict\n\t\tcolumns_dict[col_dict[\"fieldname\"]] = col_dict\n\n\treturn columns_dict\n\n\ndef get_column_as_dict(col):\n\tcol_dict = frappe._dict()\n\n\t# string\n\tif isinstance(col, str):\n\t\tcol = col.split(\":\")\n\t\tif len(col) > 1:\n\t\t\tif \"/\" in col[1]:\n\t\t\t\tcol_dict[\"fieldtype\"], col_dict[\"options\"] = col[1].split(\"/\")\n\t\t\telse:\n\t\t\t\tcol_dict[\"fieldtype\"] = col[1]\n\t\t\tif len(col) == 3:\n\t\t\t\tcol_dict[\"width\"] = col[2]\n\n\t\tcol_dict[\"label\"] = col[0]\n\t\tcol_dict[\"fieldname\"] = frappe.scrub(col[0])\n\n\t# dict\n\telse:\n\t\tcol_dict.update(col)\n\t\tif \"fieldname\" not in col_dict:\n\t\t\tcol_dict[\"fieldname\"] = frappe.scrub(col_dict[\"label\"])\n\n\treturn col_dict\n\n\ndef get_user_match_filters(doctypes, user):\n\tmatch_filters = {}\n\n\tfor dt in doctypes:\n\t\tfilter_list = frappe.desk.reportview.build_match_conditions(dt, user, False)\n\t\tif filter_list:\n\t\t\tmatch_filters[dt] = filter_list\n\n\treturn match_filters\n",
"path": "frappe/desk/query_report.py"
}
] | diff --git a/frappe/desk/query_report.py b/frappe/desk/query_report.py
index 6f4bc716aab2..5b7c450ae966 100644
--- a/frappe/desk/query_report.py
+++ b/frappe/desk/query_report.py
@@ -171,6 +171,7 @@ def get_script(report_name):
"script": render_include(script),
"html_format": html_format,
"execution_time": frappe.cache.hget("report_execution_time", report_name) or 0,
+ "filters": report.filters,
}
diff --git a/frappe/public/js/frappe/views/reports/report_utils.js b/frappe/public/js/frappe/views/reports/report_utils.js
index 9713f8bb9950..d75716541b0a 100644
--- a/frappe/public/js/frappe/views/reports/report_utils.js
+++ b/frappe/public/js/frappe/views/reports/report_utils.js
@@ -126,6 +126,13 @@ frappe.report_utils = {
.then((r) => {
frappe.dom.eval(r.script || "");
return frappe.after_ajax(() => {
+ if (
+ frappe.query_reports[report_name] &&
+ !frappe.query_reports[report_name].filter &&
+ r.filters
+ ) {
+ return (frappe.query_reports[report_name].filters = r.filters);
+ }
return (
frappe.query_reports[report_name] &&
frappe.query_reports[report_name].filters
|
e-valuation__EvaP-772 | Create semester form
The form for creating a semester should not have an `is archived` checkbox.
| [
{
"content": "from django import forms\nfrom django.db.models import Q\nfrom django.core.exceptions import SuspiciousOperation\nfrom django.forms.models import BaseInlineFormSet\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.text import normalize_newlines\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.auth.models import Group\n\nfrom evap.evaluation.forms import BootstrapMixin, QuestionnaireMultipleChoiceField\nfrom evap.evaluation.models import Contribution, Course, Question, Questionnaire, Semester, UserProfile, FaqSection, \\\n FaqQuestion, EmailTemplate, TextAnswer, Degree, RatingAnswerCounter, CourseType\nfrom evap.staff.fields import ToolTipModelMultipleChoiceField\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef disable_all_fields(form):\n for field in form.fields.values():\n field.disabled = True\n\n\nclass ImportForm(forms.Form, BootstrapMixin):\n vote_start_date = forms.DateField(label=_(\"First day of evaluation\"), localize=True)\n vote_end_date = forms.DateField(label=_(\"Last day of evaluation\"), localize=True)\n\n excel_file = forms.FileField(label=_(\"Excel file\"))\n\n\nclass UserImportForm(forms.Form, BootstrapMixin):\n excel_file = forms.FileField(label=_(\"Excel file\"))\n\n\nclass UserBulkDeleteForm(forms.Form, BootstrapMixin):\n username_file = forms.FileField(label=_(\"Username file\"))\n\n\nclass SemesterForm(forms.ModelForm, BootstrapMixin):\n class Meta:\n model = Semester\n fields = \"__all__\"\n\n\nclass DegreeForm(forms.ModelForm, BootstrapMixin):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields[\"name_de\"].widget = forms.TextInput(attrs={'class': 'form-control'})\n self.fields[\"name_en\"].widget = forms.TextInput(attrs={'class': 'form-control'})\n self.fields[\"order\"].widget = forms.HiddenInput()\n\n class Meta:\n model = Degree\n fields = \"__all__\"\n\n def clean(self):\n super().clean()\n if self.cleaned_data.get('DELETE') and not self.instance.can_staff_delete:\n raise SuspiciousOperation(\"Deleting degree not allowed\")\n\n\nclass CourseTypeForm(forms.ModelForm, BootstrapMixin):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields[\"name_de\"].widget = forms.TextInput(attrs={'class': 'form-control'})\n self.fields[\"name_en\"].widget = forms.TextInput(attrs={'class': 'form-control'})\n\n class Meta:\n model = CourseType\n fields = \"__all__\"\n\n def clean(self):\n super().clean()\n if self.cleaned_data.get('DELETE') and not self.instance.can_staff_delete:\n raise SuspiciousOperation(\"Deleting course type not allowed\")\n\n\nclass CourseTypeMergeSelectionForm(forms.Form, BootstrapMixin):\n main_type = forms.ModelChoiceField(CourseType.objects.all())\n other_type = forms.ModelChoiceField(CourseType.objects.all())\n\n def clean(self):\n super().clean()\n if self.cleaned_data.get('main_type') == self.cleaned_data.get('other_type'):\n raise ValidationError(_(\"You must select two different course types.\"))\n\n\nclass CourseForm(forms.ModelForm, BootstrapMixin):\n general_questions = QuestionnaireMultipleChoiceField(Questionnaire.objects.filter(is_for_contributors=False, obsolete=False), label=_(\"General questions\"))\n semester = forms.ModelChoiceField(Semester.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())\n\n # the following field is needed, because the auto_now=True for last_modified_time makes the corresponding field\n # uneditable and so it can't be displayed in the model form\n # see https://docs.djangoproject.com/en/dev/ref/models/fields/#datefield for details\n last_modified_time_2 = forms.DateTimeField(label=_(\"Last modified\"), required=False, localize=True, disabled=True)\n # last_modified_user would usually get a select widget but should here be displayed as a readonly CharField instead\n last_modified_user_2 = forms.CharField(label=_(\"Last modified by\"), required=False, disabled=True)\n\n class Meta:\n model = Course\n fields = ('name_de', 'name_en', 'type', 'degrees', 'is_graded', 'is_required_for_reward', 'vote_start_date',\n 'vote_end_date', 'participants', 'general_questions', 'last_modified_time_2', 'last_modified_user_2', 'semester')\n localized_fields = ('vote_start_date', 'vote_end_date')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['general_questions'].queryset = Questionnaire.objects.filter(is_for_contributors=False).filter(\n Q(obsolete=False) | Q(contributions__course=self.instance)).distinct()\n\n if self.instance.general_contribution:\n self.fields['general_questions'].initial = [q.pk for q in self.instance.general_contribution.questionnaires.all()]\n\n self.fields['last_modified_time_2'].initial = self.instance.last_modified_time\n if self.instance.last_modified_user:\n self.fields['last_modified_user_2'].initial = self.instance.last_modified_user.full_name\n\n if self.instance.state in ['inEvaluation', 'evaluated', 'reviewed']:\n self.fields['vote_start_date'].disabled = True\n\n if not self.instance.can_staff_edit:\n # form is used as read-only course view\n disable_all_fields(self)\n\n def clean(self):\n super().clean()\n vote_start_date = self.cleaned_data.get('vote_start_date')\n vote_end_date = self.cleaned_data.get('vote_end_date')\n if vote_start_date and vote_end_date:\n if vote_start_date >= vote_end_date:\n raise ValidationError(_(\"The first day of evaluation must be before the last one.\"))\n\n def save(self, user, *args, **kw):\n self.instance.last_modified_user = user\n super().save(*args, **kw)\n self.instance.general_contribution.questionnaires = self.cleaned_data.get('general_questions')\n logger.info('Course \"{}\" (id {}) was edited by staff member {}.'.format(self.instance, self.instance.id, user.username))\n\n\nclass SingleResultForm(forms.ModelForm, BootstrapMixin):\n semester = forms.ModelChoiceField(Semester.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())\n last_modified_time_2 = forms.DateTimeField(label=_(\"Last modified\"), required=False, localize=True, disabled=True)\n last_modified_user_2 = forms.CharField(label=_(\"Last modified by\"), required=False, disabled=True)\n event_date = forms.DateField(label=_(\"Event date\"), localize=True)\n responsible = forms.ModelChoiceField(label=_(\"Responsible\"), queryset=UserProfile.objects.all())\n answer_1 = forms.IntegerField(label=_(\"# very good\"), initial=0)\n answer_2 = forms.IntegerField(label=_(\"# good\"), initial=0)\n answer_3 = forms.IntegerField(label=_(\"# neutral\"), initial=0)\n answer_4 = forms.IntegerField(label=_(\"# bad\"), initial=0)\n answer_5 = forms.IntegerField(label=_(\"# very bad\"), initial=0)\n\n class Meta:\n model = Course\n fields = ('name_de', 'name_en', 'type', 'degrees', 'event_date', 'responsible', 'answer_1', 'answer_2', 'answer_3', 'answer_4', 'answer_5',\n 'last_modified_time_2', 'last_modified_user_2', 'semester')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['last_modified_time_2'].initial = self.instance.last_modified_time\n if self.instance.last_modified_user:\n self.fields['last_modified_user_2'].initial = self.instance.last_modified_user.full_name\n\n if self.instance.vote_start_date:\n self.fields['event_date'].initial = self.instance.vote_start_date\n\n if not self.instance.can_staff_edit:\n disable_all_fields(self)\n\n if self.instance.pk:\n self.fields['responsible'].initial = self.instance.responsible_contributor\n answer_counts = dict()\n for answer_counter in self.instance.ratinganswer_counters:\n answer_counts[answer_counter.answer] = answer_counter.count\n for i in range(1,6):\n self.fields['answer_' + str(i)].initial = answer_counts[i]\n\n def save(self, *args, **kw):\n user = kw.pop(\"user\")\n self.instance.last_modified_user = user\n self.instance.vote_start_date = self.cleaned_data['event_date']\n self.instance.vote_end_date = self.cleaned_data['event_date']\n self.instance.is_graded = False\n super().save(*args, **kw)\n\n single_result_questionnaire = Questionnaire.get_single_result_questionnaire()\n single_result_question = single_result_questionnaire.question_set.first()\n\n if not Contribution.objects.filter(course=self.instance, responsible=True).exists():\n contribution = Contribution(course=self.instance, contributor=self.cleaned_data['responsible'], responsible=True)\n contribution.save()\n contribution.questionnaires.add(single_result_questionnaire)\n\n # set answers\n contribution = Contribution.objects.get(course=self.instance, responsible=True)\n total_votes = 0\n for i in range(1,6):\n count = self.cleaned_data['answer_'+str(i)]\n total_votes += count\n RatingAnswerCounter.objects.update_or_create(contribution=contribution, question=single_result_question, answer=i, defaults={'count': count})\n self.instance._participant_count = total_votes\n self.instance._voter_count = total_votes\n\n # change state to \"reviewed\"\n # works only for single_results so the course and its contribution must be saved first\n self.instance.single_result_created()\n self.instance.save()\n\n\nclass ContributionForm(forms.ModelForm, BootstrapMixin):\n responsibility = forms.ChoiceField(widget=forms.RadioSelect(), choices=Contribution.RESPONSIBILITY_CHOICES)\n course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())\n questionnaires = QuestionnaireMultipleChoiceField(Questionnaire.objects.filter(is_for_contributors=True, obsolete=False), label=_(\"Questionnaires\"))\n\n class Meta:\n model = Contribution\n fields = ('course', 'contributor', 'questionnaires', 'order', 'responsibility', 'comment_visibility', 'label')\n widgets = {'order': forms.HiddenInput(), 'comment_visibility': forms.RadioSelect(choices=Contribution.COMMENT_VISIBILITY_CHOICES)}\n\n def __init__(self, *args, **kwargs):\n # work around https://code.djangoproject.com/ticket/25880\n self.course = kwargs.pop('course', None)\n if self.course is None:\n assert 'instance' in kwargs\n self.course = kwargs['instance'].course\n\n super().__init__(*args, **kwargs)\n\n self.fields['contributor'].widget.attrs['class'] = 'form-control'\n self.fields['label'].widget.attrs['class'] = 'form-control'\n\n if self.instance.responsible:\n self.fields['responsibility'].initial = Contribution.IS_RESPONSIBLE\n elif self.instance.can_edit:\n self.fields['responsibility'].initial = Contribution.IS_EDITOR\n else:\n self.fields['responsibility'].initial = Contribution.IS_CONTRIBUTOR\n\n self.fields['questionnaires'].queryset = Questionnaire.objects.filter(is_for_contributors=True).filter(\n Q(obsolete=False) | Q(contributions__course=self.course)).distinct()\n\n if not self.course.can_staff_edit:\n # form is used as read-only course view\n disable_all_fields(self)\n\n def save(self, *args, **kwargs):\n responsibility = self.cleaned_data['responsibility']\n is_responsible = responsibility == Contribution.IS_RESPONSIBLE\n is_editor = responsibility == Contribution.IS_EDITOR\n self.instance.responsible = is_responsible\n self.instance.can_edit = is_responsible or is_editor\n if is_responsible:\n self.instance.comment_visibility = Contribution.ALL_COMMENTS\n return super().save(*args, **kwargs)\n\n\nclass CourseEmailForm(forms.Form, BootstrapMixin):\n recipients = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=EmailTemplate.EMAIL_RECIPIENTS, label=_(\"Send email to\"))\n subject = forms.CharField(label=_(\"Subject\"))\n body = forms.CharField(widget=forms.Textarea(), label=_(\"Message\"))\n\n def __init__(self, *args, **kwargs):\n self.instance = kwargs.pop('instance')\n self.export = kwargs.pop('export', False)\n self.template = EmailTemplate()\n super().__init__(*args, **kwargs)\n self.fields['subject'].required = not self.export\n self.fields['body'].required = not self.export\n\n def clean(self):\n self.recipient_groups = self.cleaned_data.get('recipients')\n\n if not self.recipient_groups:\n raise forms.ValidationError(_(\"No recipient selected. Choose at least one group of recipients.\"))\n\n return self.cleaned_data\n\n # returns the number of recipients without an email address\n def missing_email_addresses(self):\n recipients = self.template.recipient_list_for_course(self.instance, self.recipient_groups)\n return len([user for user in recipients if not user.email])\n\n def email_addresses(self):\n if self.recipient_groups is None:\n return []\n recipients = self.template.recipient_list_for_course(self.instance, self.recipient_groups)\n return set(user.email for user in recipients if user.email)\n\n def send(self):\n self.template.subject = self.cleaned_data.get('subject')\n self.template.body = self.cleaned_data.get('body')\n EmailTemplate.send_to_users_in_courses(self.template, [self.instance], self.recipient_groups, use_cc=True)\n\n\nclass QuestionnaireForm(forms.ModelForm, BootstrapMixin):\n\n class Meta:\n model = Questionnaire\n exclude = ()\n widgets = {'index': forms.HiddenInput()}\n\n\nclass AtLeastOneFormSet(BaseInlineFormSet):\n def clean(self):\n super().clean()\n count = 0\n for form in self.forms:\n if form.cleaned_data and not form.cleaned_data.get('DELETE', False):\n count += 1\n\n if count < 1:\n raise forms.ValidationError(_('You must have at least one of these.'))\n\n\nclass ContributionFormSet(AtLeastOneFormSet):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.queryset = self.instance.contributions.exclude(contributor=None)\n\n def handle_deleted_and_added_contributions(self):\n \"\"\"\n If a contributor got removed and added in the same formset, django would usually complain\n when validating the added form, as it does not check whether the existing contribution was deleted.\n This method works around that.\n \"\"\"\n for form_with_errors in self.forms:\n if not form_with_errors.errors:\n continue\n for deleted_form in self.forms:\n if not deleted_form.cleaned_data or not deleted_form.cleaned_data.get('DELETE'):\n continue\n if not deleted_form.cleaned_data['contributor'] == form_with_errors.cleaned_data['contributor']:\n continue\n form_with_errors.cleaned_data['id'] = deleted_form.cleaned_data['id']\n form_with_errors.instance = deleted_form.instance\n # we modified the form, so we have to force re-validation\n form_with_errors.full_clean()\n\n def clean(self):\n self.handle_deleted_and_added_contributions()\n\n super().clean()\n\n found_contributor = set()\n count_responsible = 0\n for form in self.forms:\n if not form.cleaned_data or form.cleaned_data.get('DELETE'):\n continue\n contributor = form.cleaned_data.get('contributor')\n if contributor is None:\n raise forms.ValidationError(_('Please select the name of each added contributor. Remove empty rows if necessary.'))\n if contributor and contributor in found_contributor:\n raise forms.ValidationError(_('Duplicate contributor found. Each contributor should only be used once.'))\n elif contributor:\n found_contributor.add(contributor)\n\n if form.cleaned_data.get('responsibility') == 'RESPONSIBLE':\n count_responsible += 1\n\n if count_responsible < 1:\n raise forms.ValidationError(_('No responsible contributor found. Each course must have exactly one responsible contributor.'))\n elif count_responsible > 1:\n raise forms.ValidationError(_('Too many responsible contributors found. Each course must have exactly one responsible contributor.'))\n\n\nclass QuestionForm(forms.ModelForm):\n class Meta:\n model = Question\n fields = \"__all__\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['text_de'].widget = forms.TextInput(attrs={'class':'form-control'})\n self.fields['text_en'].widget = forms.TextInput(attrs={'class':'form-control'})\n self.fields['type'].widget.attrs['class'] = 'form-control'\n\n\nclass QuestionnairesAssignForm(forms.Form, BootstrapMixin):\n def __init__(self, *args, **kwargs):\n course_types = kwargs.pop('course_types')\n super().__init__(*args, **kwargs)\n\n for course_type in course_types:\n self.fields[course_type.name] = ToolTipModelMultipleChoiceField(required=False, queryset=Questionnaire.objects.filter(obsolete=False, is_for_contributors=False))\n self.fields['Responsible contributor'] = ToolTipModelMultipleChoiceField(label=_('Responsible contributor'), required=False, queryset=Questionnaire.objects.filter(obsolete=False, is_for_contributors=True))\n\n\nclass UserForm(forms.ModelForm, BootstrapMixin):\n is_staff = forms.BooleanField(required=False, label=_(\"Staff user\"))\n is_grade_user = forms.BooleanField(required=False, label=_(\"Grade user\"))\n courses_participating_in = forms.ModelMultipleChoiceField(None, required=False, label=_(\"Courses participating in (active semester)\"))\n\n class Meta:\n model = UserProfile\n fields = ('username', 'title', 'first_name', 'last_name', 'email', 'delegates', 'cc_users')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n courses_in_active_semester = Course.objects.filter(semester=Semester.active_semester())\n excludes = [x.id for x in courses_in_active_semester if x.is_single_result()]\n courses_in_active_semester = courses_in_active_semester.exclude(id__in=excludes)\n self.fields['courses_participating_in'].queryset = courses_in_active_semester\n if self.instance.pk:\n self.fields['courses_participating_in'].initial = courses_in_active_semester.filter(participants=self.instance)\n self.fields['is_staff'].initial = self.instance.is_staff\n self.fields['is_grade_user'].initial = self.instance.is_grade_publisher\n\n def clean_username(self):\n username = self.cleaned_data.get('username')\n user_with_same_name = UserProfile.objects.filter(username__iexact=username)\n\n # make sure we don't take the instance itself into account\n if self.instance and self.instance.pk:\n user_with_same_name = user_with_same_name.exclude(pk=self.instance.pk)\n\n if user_with_same_name.exists():\n raise forms.ValidationError(_(\"A user with the username '%s' already exists\") % username)\n return username.lower()\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n user_with_same_email = UserProfile.objects.filter(email__iexact=email)\n\n # make sure we don't take the instance itself into account\n if self.instance and self.instance.pk:\n user_with_same_email = user_with_same_email.exclude(pk=self.instance.pk)\n\n if user_with_same_email.exists():\n raise forms.ValidationError(_(\"A user with the email '%s' already exists\") % email)\n return email.lower()\n\n def save(self, *args, **kw):\n super().save(*args, **kw)\n self.instance.courses_participating_in = list(self.instance.courses_participating_in.exclude(semester=Semester.active_semester())) + list(self.cleaned_data.get('courses_participating_in'))\n\n staff_group = Group.objects.get(name=\"Staff\")\n grade_user_group = Group.objects.get(name=\"Grade publisher\")\n if self.cleaned_data.get('is_staff'):\n self.instance.groups.add(staff_group)\n else:\n self.instance.groups.remove(staff_group)\n\n if self.cleaned_data.get('is_grade_user'):\n self.instance.groups.add(grade_user_group)\n else:\n self.instance.groups.remove(grade_user_group)\n\n\nclass UserMergeSelectionForm(forms.Form, BootstrapMixin):\n main_user = forms.ModelChoiceField(UserProfile.objects.all())\n other_user = forms.ModelChoiceField(UserProfile.objects.all())\n\n\nclass LotteryForm(forms.Form, BootstrapMixin):\n number_of_winners = forms.IntegerField(label=_(\"Number of Winners\"), initial=3)\n\n\nclass EmailTemplateForm(forms.ModelForm, BootstrapMixin):\n class Meta:\n model = EmailTemplate\n exclude = (\"name\", )\n\n\nclass FaqSectionForm(forms.ModelForm, BootstrapMixin):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields[\"title_de\"].widget = forms.TextInput(attrs={'class': 'form-control'})\n self.fields[\"title_en\"].widget = forms.TextInput(attrs={'class': 'form-control'})\n self.fields[\"order\"].widget = forms.HiddenInput()\n\n class Meta:\n model = FaqSection\n exclude = ()\n\n\nclass FaqQuestionForm(forms.ModelForm, BootstrapMixin):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields[\"question_de\"].widget = forms.TextInput(attrs={'class': 'form-control'})\n self.fields[\"question_en\"].widget = forms.TextInput(attrs={'class': 'form-control'})\n self.fields[\"answer_de\"].widget.attrs['class'] = 'form-control'\n self.fields[\"answer_en\"].widget.attrs['class'] = 'form-control'\n self.fields[\"order\"].widget = forms.HiddenInput()\n\n class Meta:\n model = FaqQuestion\n exclude = (\"section\",)\n\n\nclass TextAnswerForm(forms.ModelForm, BootstrapMixin):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['original_answer'].disabled = \"True\"\n\n class Meta:\n model = TextAnswer\n fields = (\"original_answer\", \"reviewed_answer\",)\n\n def clean_reviewed_answer(self):\n reviewed_answer = normalize_newlines(self.cleaned_data.get('reviewed_answer'))\n if reviewed_answer == normalize_newlines(self.instance.original_answer) or reviewed_answer == '':\n return None\n return reviewed_answer\n\n\nclass ExportSheetForm(forms.Form, BootstrapMixin):\n def __init__(self, semester, *args, **kwargs):\n super(ExportSheetForm, self).__init__(*args, **kwargs)\n course_types = CourseType.objects.filter(courses__semester=semester).distinct()\n course_type_tuples = [(ct.pk, ct.name) for ct in course_types]\n self.fields['selected_course_types'] = forms.MultipleChoiceField(\n choices=course_type_tuples,\n required=True,\n widget=forms.CheckboxSelectMultiple(),\n label=_(\"Course types\")\n )\n",
"path": "evap/staff/forms.py"
}
] | [
{
"content": "from django import forms\nfrom django.db.models import Q\nfrom django.core.exceptions import SuspiciousOperation\nfrom django.forms.models import BaseInlineFormSet\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.text import normalize_newlines\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.auth.models import Group\n\nfrom evap.evaluation.forms import BootstrapMixin, QuestionnaireMultipleChoiceField\nfrom evap.evaluation.models import Contribution, Course, Question, Questionnaire, Semester, UserProfile, FaqSection, \\\n FaqQuestion, EmailTemplate, TextAnswer, Degree, RatingAnswerCounter, CourseType\nfrom evap.staff.fields import ToolTipModelMultipleChoiceField\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef disable_all_fields(form):\n for field in form.fields.values():\n field.disabled = True\n\n\nclass ImportForm(forms.Form, BootstrapMixin):\n vote_start_date = forms.DateField(label=_(\"First day of evaluation\"), localize=True)\n vote_end_date = forms.DateField(label=_(\"Last day of evaluation\"), localize=True)\n\n excel_file = forms.FileField(label=_(\"Excel file\"))\n\n\nclass UserImportForm(forms.Form, BootstrapMixin):\n excel_file = forms.FileField(label=_(\"Excel file\"))\n\n\nclass UserBulkDeleteForm(forms.Form, BootstrapMixin):\n username_file = forms.FileField(label=_(\"Username file\"))\n\n\nclass SemesterForm(forms.ModelForm, BootstrapMixin):\n class Meta:\n model = Semester\n fields = (\"name_de\", \"name_en\")\n\n\nclass DegreeForm(forms.ModelForm, BootstrapMixin):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields[\"name_de\"].widget = forms.TextInput(attrs={'class': 'form-control'})\n self.fields[\"name_en\"].widget = forms.TextInput(attrs={'class': 'form-control'})\n self.fields[\"order\"].widget = forms.HiddenInput()\n\n class Meta:\n model = Degree\n fields = \"__all__\"\n\n def clean(self):\n super().clean()\n if self.cleaned_data.get('DELETE') and not self.instance.can_staff_delete:\n raise SuspiciousOperation(\"Deleting degree not allowed\")\n\n\nclass CourseTypeForm(forms.ModelForm, BootstrapMixin):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields[\"name_de\"].widget = forms.TextInput(attrs={'class': 'form-control'})\n self.fields[\"name_en\"].widget = forms.TextInput(attrs={'class': 'form-control'})\n\n class Meta:\n model = CourseType\n fields = \"__all__\"\n\n def clean(self):\n super().clean()\n if self.cleaned_data.get('DELETE') and not self.instance.can_staff_delete:\n raise SuspiciousOperation(\"Deleting course type not allowed\")\n\n\nclass CourseTypeMergeSelectionForm(forms.Form, BootstrapMixin):\n main_type = forms.ModelChoiceField(CourseType.objects.all())\n other_type = forms.ModelChoiceField(CourseType.objects.all())\n\n def clean(self):\n super().clean()\n if self.cleaned_data.get('main_type') == self.cleaned_data.get('other_type'):\n raise ValidationError(_(\"You must select two different course types.\"))\n\n\nclass CourseForm(forms.ModelForm, BootstrapMixin):\n general_questions = QuestionnaireMultipleChoiceField(Questionnaire.objects.filter(is_for_contributors=False, obsolete=False), label=_(\"General questions\"))\n semester = forms.ModelChoiceField(Semester.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())\n\n # the following field is needed, because the auto_now=True for last_modified_time makes the corresponding field\n # uneditable and so it can't be displayed in the model form\n # see https://docs.djangoproject.com/en/dev/ref/models/fields/#datefield for details\n last_modified_time_2 = forms.DateTimeField(label=_(\"Last modified\"), required=False, localize=True, disabled=True)\n # last_modified_user would usually get a select widget but should here be displayed as a readonly CharField instead\n last_modified_user_2 = forms.CharField(label=_(\"Last modified by\"), required=False, disabled=True)\n\n class Meta:\n model = Course\n fields = ('name_de', 'name_en', 'type', 'degrees', 'is_graded', 'is_required_for_reward', 'vote_start_date',\n 'vote_end_date', 'participants', 'general_questions', 'last_modified_time_2', 'last_modified_user_2', 'semester')\n localized_fields = ('vote_start_date', 'vote_end_date')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['general_questions'].queryset = Questionnaire.objects.filter(is_for_contributors=False).filter(\n Q(obsolete=False) | Q(contributions__course=self.instance)).distinct()\n\n if self.instance.general_contribution:\n self.fields['general_questions'].initial = [q.pk for q in self.instance.general_contribution.questionnaires.all()]\n\n self.fields['last_modified_time_2'].initial = self.instance.last_modified_time\n if self.instance.last_modified_user:\n self.fields['last_modified_user_2'].initial = self.instance.last_modified_user.full_name\n\n if self.instance.state in ['inEvaluation', 'evaluated', 'reviewed']:\n self.fields['vote_start_date'].disabled = True\n\n if not self.instance.can_staff_edit:\n # form is used as read-only course view\n disable_all_fields(self)\n\n def clean(self):\n super().clean()\n vote_start_date = self.cleaned_data.get('vote_start_date')\n vote_end_date = self.cleaned_data.get('vote_end_date')\n if vote_start_date and vote_end_date:\n if vote_start_date >= vote_end_date:\n raise ValidationError(_(\"The first day of evaluation must be before the last one.\"))\n\n def save(self, user, *args, **kw):\n self.instance.last_modified_user = user\n super().save(*args, **kw)\n self.instance.general_contribution.questionnaires = self.cleaned_data.get('general_questions')\n logger.info('Course \"{}\" (id {}) was edited by staff member {}.'.format(self.instance, self.instance.id, user.username))\n\n\nclass SingleResultForm(forms.ModelForm, BootstrapMixin):\n semester = forms.ModelChoiceField(Semester.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())\n last_modified_time_2 = forms.DateTimeField(label=_(\"Last modified\"), required=False, localize=True, disabled=True)\n last_modified_user_2 = forms.CharField(label=_(\"Last modified by\"), required=False, disabled=True)\n event_date = forms.DateField(label=_(\"Event date\"), localize=True)\n responsible = forms.ModelChoiceField(label=_(\"Responsible\"), queryset=UserProfile.objects.all())\n answer_1 = forms.IntegerField(label=_(\"# very good\"), initial=0)\n answer_2 = forms.IntegerField(label=_(\"# good\"), initial=0)\n answer_3 = forms.IntegerField(label=_(\"# neutral\"), initial=0)\n answer_4 = forms.IntegerField(label=_(\"# bad\"), initial=0)\n answer_5 = forms.IntegerField(label=_(\"# very bad\"), initial=0)\n\n class Meta:\n model = Course\n fields = ('name_de', 'name_en', 'type', 'degrees', 'event_date', 'responsible', 'answer_1', 'answer_2', 'answer_3', 'answer_4', 'answer_5',\n 'last_modified_time_2', 'last_modified_user_2', 'semester')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['last_modified_time_2'].initial = self.instance.last_modified_time\n if self.instance.last_modified_user:\n self.fields['last_modified_user_2'].initial = self.instance.last_modified_user.full_name\n\n if self.instance.vote_start_date:\n self.fields['event_date'].initial = self.instance.vote_start_date\n\n if not self.instance.can_staff_edit:\n disable_all_fields(self)\n\n if self.instance.pk:\n self.fields['responsible'].initial = self.instance.responsible_contributor\n answer_counts = dict()\n for answer_counter in self.instance.ratinganswer_counters:\n answer_counts[answer_counter.answer] = answer_counter.count\n for i in range(1,6):\n self.fields['answer_' + str(i)].initial = answer_counts[i]\n\n def save(self, *args, **kw):\n user = kw.pop(\"user\")\n self.instance.last_modified_user = user\n self.instance.vote_start_date = self.cleaned_data['event_date']\n self.instance.vote_end_date = self.cleaned_data['event_date']\n self.instance.is_graded = False\n super().save(*args, **kw)\n\n single_result_questionnaire = Questionnaire.get_single_result_questionnaire()\n single_result_question = single_result_questionnaire.question_set.first()\n\n if not Contribution.objects.filter(course=self.instance, responsible=True).exists():\n contribution = Contribution(course=self.instance, contributor=self.cleaned_data['responsible'], responsible=True)\n contribution.save()\n contribution.questionnaires.add(single_result_questionnaire)\n\n # set answers\n contribution = Contribution.objects.get(course=self.instance, responsible=True)\n total_votes = 0\n for i in range(1,6):\n count = self.cleaned_data['answer_'+str(i)]\n total_votes += count\n RatingAnswerCounter.objects.update_or_create(contribution=contribution, question=single_result_question, answer=i, defaults={'count': count})\n self.instance._participant_count = total_votes\n self.instance._voter_count = total_votes\n\n # change state to \"reviewed\"\n # works only for single_results so the course and its contribution must be saved first\n self.instance.single_result_created()\n self.instance.save()\n\n\nclass ContributionForm(forms.ModelForm, BootstrapMixin):\n responsibility = forms.ChoiceField(widget=forms.RadioSelect(), choices=Contribution.RESPONSIBILITY_CHOICES)\n course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())\n questionnaires = QuestionnaireMultipleChoiceField(Questionnaire.objects.filter(is_for_contributors=True, obsolete=False), label=_(\"Questionnaires\"))\n\n class Meta:\n model = Contribution\n fields = ('course', 'contributor', 'questionnaires', 'order', 'responsibility', 'comment_visibility', 'label')\n widgets = {'order': forms.HiddenInput(), 'comment_visibility': forms.RadioSelect(choices=Contribution.COMMENT_VISIBILITY_CHOICES)}\n\n def __init__(self, *args, **kwargs):\n # work around https://code.djangoproject.com/ticket/25880\n self.course = kwargs.pop('course', None)\n if self.course is None:\n assert 'instance' in kwargs\n self.course = kwargs['instance'].course\n\n super().__init__(*args, **kwargs)\n\n self.fields['contributor'].widget.attrs['class'] = 'form-control'\n self.fields['label'].widget.attrs['class'] = 'form-control'\n\n if self.instance.responsible:\n self.fields['responsibility'].initial = Contribution.IS_RESPONSIBLE\n elif self.instance.can_edit:\n self.fields['responsibility'].initial = Contribution.IS_EDITOR\n else:\n self.fields['responsibility'].initial = Contribution.IS_CONTRIBUTOR\n\n self.fields['questionnaires'].queryset = Questionnaire.objects.filter(is_for_contributors=True).filter(\n Q(obsolete=False) | Q(contributions__course=self.course)).distinct()\n\n if not self.course.can_staff_edit:\n # form is used as read-only course view\n disable_all_fields(self)\n\n def save(self, *args, **kwargs):\n responsibility = self.cleaned_data['responsibility']\n is_responsible = responsibility == Contribution.IS_RESPONSIBLE\n is_editor = responsibility == Contribution.IS_EDITOR\n self.instance.responsible = is_responsible\n self.instance.can_edit = is_responsible or is_editor\n if is_responsible:\n self.instance.comment_visibility = Contribution.ALL_COMMENTS\n return super().save(*args, **kwargs)\n\n\nclass CourseEmailForm(forms.Form, BootstrapMixin):\n recipients = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=EmailTemplate.EMAIL_RECIPIENTS, label=_(\"Send email to\"))\n subject = forms.CharField(label=_(\"Subject\"))\n body = forms.CharField(widget=forms.Textarea(), label=_(\"Message\"))\n\n def __init__(self, *args, **kwargs):\n self.instance = kwargs.pop('instance')\n self.export = kwargs.pop('export', False)\n self.template = EmailTemplate()\n super().__init__(*args, **kwargs)\n self.fields['subject'].required = not self.export\n self.fields['body'].required = not self.export\n\n def clean(self):\n self.recipient_groups = self.cleaned_data.get('recipients')\n\n if not self.recipient_groups:\n raise forms.ValidationError(_(\"No recipient selected. Choose at least one group of recipients.\"))\n\n return self.cleaned_data\n\n # returns the number of recipients without an email address\n def missing_email_addresses(self):\n recipients = self.template.recipient_list_for_course(self.instance, self.recipient_groups)\n return len([user for user in recipients if not user.email])\n\n def email_addresses(self):\n if self.recipient_groups is None:\n return []\n recipients = self.template.recipient_list_for_course(self.instance, self.recipient_groups)\n return set(user.email for user in recipients if user.email)\n\n def send(self):\n self.template.subject = self.cleaned_data.get('subject')\n self.template.body = self.cleaned_data.get('body')\n EmailTemplate.send_to_users_in_courses(self.template, [self.instance], self.recipient_groups, use_cc=True)\n\n\nclass QuestionnaireForm(forms.ModelForm, BootstrapMixin):\n\n class Meta:\n model = Questionnaire\n exclude = ()\n widgets = {'index': forms.HiddenInput()}\n\n\nclass AtLeastOneFormSet(BaseInlineFormSet):\n def clean(self):\n super().clean()\n count = 0\n for form in self.forms:\n if form.cleaned_data and not form.cleaned_data.get('DELETE', False):\n count += 1\n\n if count < 1:\n raise forms.ValidationError(_('You must have at least one of these.'))\n\n\nclass ContributionFormSet(AtLeastOneFormSet):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.queryset = self.instance.contributions.exclude(contributor=None)\n\n def handle_deleted_and_added_contributions(self):\n \"\"\"\n If a contributor got removed and added in the same formset, django would usually complain\n when validating the added form, as it does not check whether the existing contribution was deleted.\n This method works around that.\n \"\"\"\n for form_with_errors in self.forms:\n if not form_with_errors.errors:\n continue\n for deleted_form in self.forms:\n if not deleted_form.cleaned_data or not deleted_form.cleaned_data.get('DELETE'):\n continue\n if not deleted_form.cleaned_data['contributor'] == form_with_errors.cleaned_data['contributor']:\n continue\n form_with_errors.cleaned_data['id'] = deleted_form.cleaned_data['id']\n form_with_errors.instance = deleted_form.instance\n # we modified the form, so we have to force re-validation\n form_with_errors.full_clean()\n\n def clean(self):\n self.handle_deleted_and_added_contributions()\n\n super().clean()\n\n found_contributor = set()\n count_responsible = 0\n for form in self.forms:\n if not form.cleaned_data or form.cleaned_data.get('DELETE'):\n continue\n contributor = form.cleaned_data.get('contributor')\n if contributor is None:\n raise forms.ValidationError(_('Please select the name of each added contributor. Remove empty rows if necessary.'))\n if contributor and contributor in found_contributor:\n raise forms.ValidationError(_('Duplicate contributor found. Each contributor should only be used once.'))\n elif contributor:\n found_contributor.add(contributor)\n\n if form.cleaned_data.get('responsibility') == 'RESPONSIBLE':\n count_responsible += 1\n\n if count_responsible < 1:\n raise forms.ValidationError(_('No responsible contributor found. Each course must have exactly one responsible contributor.'))\n elif count_responsible > 1:\n raise forms.ValidationError(_('Too many responsible contributors found. Each course must have exactly one responsible contributor.'))\n\n\nclass QuestionForm(forms.ModelForm):\n class Meta:\n model = Question\n fields = \"__all__\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['text_de'].widget = forms.TextInput(attrs={'class':'form-control'})\n self.fields['text_en'].widget = forms.TextInput(attrs={'class':'form-control'})\n self.fields['type'].widget.attrs['class'] = 'form-control'\n\n\nclass QuestionnairesAssignForm(forms.Form, BootstrapMixin):\n def __init__(self, *args, **kwargs):\n course_types = kwargs.pop('course_types')\n super().__init__(*args, **kwargs)\n\n for course_type in course_types:\n self.fields[course_type.name] = ToolTipModelMultipleChoiceField(required=False, queryset=Questionnaire.objects.filter(obsolete=False, is_for_contributors=False))\n self.fields['Responsible contributor'] = ToolTipModelMultipleChoiceField(label=_('Responsible contributor'), required=False, queryset=Questionnaire.objects.filter(obsolete=False, is_for_contributors=True))\n\n\nclass UserForm(forms.ModelForm, BootstrapMixin):\n is_staff = forms.BooleanField(required=False, label=_(\"Staff user\"))\n is_grade_user = forms.BooleanField(required=False, label=_(\"Grade user\"))\n courses_participating_in = forms.ModelMultipleChoiceField(None, required=False, label=_(\"Courses participating in (active semester)\"))\n\n class Meta:\n model = UserProfile\n fields = ('username', 'title', 'first_name', 'last_name', 'email', 'delegates', 'cc_users')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n courses_in_active_semester = Course.objects.filter(semester=Semester.active_semester())\n excludes = [x.id for x in courses_in_active_semester if x.is_single_result()]\n courses_in_active_semester = courses_in_active_semester.exclude(id__in=excludes)\n self.fields['courses_participating_in'].queryset = courses_in_active_semester\n if self.instance.pk:\n self.fields['courses_participating_in'].initial = courses_in_active_semester.filter(participants=self.instance)\n self.fields['is_staff'].initial = self.instance.is_staff\n self.fields['is_grade_user'].initial = self.instance.is_grade_publisher\n\n def clean_username(self):\n username = self.cleaned_data.get('username')\n user_with_same_name = UserProfile.objects.filter(username__iexact=username)\n\n # make sure we don't take the instance itself into account\n if self.instance and self.instance.pk:\n user_with_same_name = user_with_same_name.exclude(pk=self.instance.pk)\n\n if user_with_same_name.exists():\n raise forms.ValidationError(_(\"A user with the username '%s' already exists\") % username)\n return username.lower()\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n user_with_same_email = UserProfile.objects.filter(email__iexact=email)\n\n # make sure we don't take the instance itself into account\n if self.instance and self.instance.pk:\n user_with_same_email = user_with_same_email.exclude(pk=self.instance.pk)\n\n if user_with_same_email.exists():\n raise forms.ValidationError(_(\"A user with the email '%s' already exists\") % email)\n return email.lower()\n\n def save(self, *args, **kw):\n super().save(*args, **kw)\n self.instance.courses_participating_in = list(self.instance.courses_participating_in.exclude(semester=Semester.active_semester())) + list(self.cleaned_data.get('courses_participating_in'))\n\n staff_group = Group.objects.get(name=\"Staff\")\n grade_user_group = Group.objects.get(name=\"Grade publisher\")\n if self.cleaned_data.get('is_staff'):\n self.instance.groups.add(staff_group)\n else:\n self.instance.groups.remove(staff_group)\n\n if self.cleaned_data.get('is_grade_user'):\n self.instance.groups.add(grade_user_group)\n else:\n self.instance.groups.remove(grade_user_group)\n\n\nclass UserMergeSelectionForm(forms.Form, BootstrapMixin):\n main_user = forms.ModelChoiceField(UserProfile.objects.all())\n other_user = forms.ModelChoiceField(UserProfile.objects.all())\n\n\nclass LotteryForm(forms.Form, BootstrapMixin):\n number_of_winners = forms.IntegerField(label=_(\"Number of Winners\"), initial=3)\n\n\nclass EmailTemplateForm(forms.ModelForm, BootstrapMixin):\n class Meta:\n model = EmailTemplate\n exclude = (\"name\", )\n\n\nclass FaqSectionForm(forms.ModelForm, BootstrapMixin):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields[\"title_de\"].widget = forms.TextInput(attrs={'class': 'form-control'})\n self.fields[\"title_en\"].widget = forms.TextInput(attrs={'class': 'form-control'})\n self.fields[\"order\"].widget = forms.HiddenInput()\n\n class Meta:\n model = FaqSection\n exclude = ()\n\n\nclass FaqQuestionForm(forms.ModelForm, BootstrapMixin):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields[\"question_de\"].widget = forms.TextInput(attrs={'class': 'form-control'})\n self.fields[\"question_en\"].widget = forms.TextInput(attrs={'class': 'form-control'})\n self.fields[\"answer_de\"].widget.attrs['class'] = 'form-control'\n self.fields[\"answer_en\"].widget.attrs['class'] = 'form-control'\n self.fields[\"order\"].widget = forms.HiddenInput()\n\n class Meta:\n model = FaqQuestion\n exclude = (\"section\",)\n\n\nclass TextAnswerForm(forms.ModelForm, BootstrapMixin):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['original_answer'].disabled = \"True\"\n\n class Meta:\n model = TextAnswer\n fields = (\"original_answer\", \"reviewed_answer\",)\n\n def clean_reviewed_answer(self):\n reviewed_answer = normalize_newlines(self.cleaned_data.get('reviewed_answer'))\n if reviewed_answer == normalize_newlines(self.instance.original_answer) or reviewed_answer == '':\n return None\n return reviewed_answer\n\n\nclass ExportSheetForm(forms.Form, BootstrapMixin):\n def __init__(self, semester, *args, **kwargs):\n super(ExportSheetForm, self).__init__(*args, **kwargs)\n course_types = CourseType.objects.filter(courses__semester=semester).distinct()\n course_type_tuples = [(ct.pk, ct.name) for ct in course_types]\n self.fields['selected_course_types'] = forms.MultipleChoiceField(\n choices=course_type_tuples,\n required=True,\n widget=forms.CheckboxSelectMultiple(),\n label=_(\"Course types\")\n )\n",
"path": "evap/staff/forms.py"
}
] | diff --git a/evap/staff/forms.py b/evap/staff/forms.py
index 1f7300d092..9dbf48ca70 100644
--- a/evap/staff/forms.py
+++ b/evap/staff/forms.py
@@ -40,7 +40,7 @@ class UserBulkDeleteForm(forms.Form, BootstrapMixin):
class SemesterForm(forms.ModelForm, BootstrapMixin):
class Meta:
model = Semester
- fields = "__all__"
+ fields = ("name_de", "name_en")
class DegreeForm(forms.ModelForm, BootstrapMixin):
|
sunpy__sunpy-3380 | Add .shape attribute to TimeSeries
It would be useful if `TimeSeries` had a .shape attribute, that returned a tuple `(nrows, ncols)`, similarly to a numpy array or pandas dataframe.
(p.s. I may have got rows and cols the wrong way round, this needs checking...)
| [
{
"content": "\"\"\"\nThis module provies `sunpy.timeseries.GenericTimeSeries` which all other\n`sunpy.timeseries.TimeSeries` classes inherit from.\n\"\"\"\nimport copy\nimport warnings\nfrom collections import OrderedDict\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport astropy\nimport astropy.units as u\nfrom astropy.table import Column, Table\n\nfrom sunpy import config\nfrom sunpy.time import TimeRange\nfrom sunpy.timeseries import TimeSeriesMetaData\nfrom sunpy.util.exceptions import SunpyUserWarning\nfrom sunpy.util.metadata import MetaDict\nfrom sunpy.visualization import peek_show\n\n# define and register a new unit, needed for RHESSI\ndet = u.def_unit('detector')\nu.add_enabled_units([det])\n\nTIME_FORMAT = config.get(\"general\", \"time_format\")\n\n__all__ = [\"GenericTimeSeries\"]\n\n\nclass GenericTimeSeries:\n \"\"\"\n A generic time series object.\n\n Parameters\n ----------\n data : `~pandas.DataFrame`\n A `pandas.DataFrame` representing one or more fields as a function of time.\n meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional\n The metadata giving details about the time series data/instrument.\n Defaults to `None`.\n units : `dict`, optional\n A mapping from column names in ``data`` to the physical units of that column.\n Defaults to `None`.\n\n Attributes\n ----------\n data : `~pandas.DataFrame`\n A `pandas.DataFrame` representing one or more fields as a function of time.\n meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`\n The metadata giving details about the time series data/instrument.\n units : `dict`\n A mapping from column names in ``data`` to the physical units ofthat column.\n\n Examples\n --------\n >>> from sunpy.timeseries import TimeSeries\n >>> from sunpy.time import parse_time\n >>> from astropy.time import TimeDelta\n >>> import numpy as np\n >>> import pandas as pd\n >>> times = parse_time(\"now\") - TimeDelta(np.arange(24 * 60)*u.minute)\n >>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))\n >>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])\n >>> ts = TimeSeries(df)\n >>> ts.peek() # doctest: +SKIP\n\n References\n ----------\n * `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_\n \"\"\"\n # Class attribute used to specify the source class of the TimeSeries.\n _source = None\n _registry = dict()\n\n def __init_subclass__(cls, **kwargs):\n \"\"\"\n An __init_subclass__ hook initializes all of the subclasses of a given\n class.\n\n So for each subclass, it will call this block of code on import.\n This replicates some metaclass magic without the need to be\n aware of metaclasses. Here we use this to register each subclass\n in a dict that has the `is_datasource_for` attribute. This is\n then passed into the TimeSeries Factory so we can register them.\n \"\"\"\n super().__init_subclass__(**kwargs)\n if hasattr(cls, 'is_datasource_for'):\n cls._registry[cls] = cls.is_datasource_for\n\n # kwargs are not used here but are passed in for sources.\n def __init__(self, data, meta=None, units=None, **kwargs):\n self.data = data\n tr = self.time_range\n # Check metadata input\n if meta is None:\n # No meta given, so default\n self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))\n elif isinstance(meta, (dict, OrderedDict, MetaDict)):\n # Given the values for metadata (dict) and infer timerange and colnames from the data\n self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))\n elif isinstance(meta, tuple):\n # Given the values all in a tuple\n self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))\n else:\n # Should have a list of 3-tuples giving a complex metadata list.\n self.meta = meta\n\n if units is None:\n self.units = {}\n else:\n self.units = units\n\n # TODO: Fix this?\n # Validate input data\n # self._validate_meta()\n # self._validate_units()\n\n# #### Attribute definitions #### #\n\n @property\n def source(self):\n \"\"\"\n A string/object used to specify the source class of the TimeSeries.\n \"\"\"\n return self._source\n\n @property\n def columns(self):\n \"\"\"\n A list of all the names of the columns in the data.\n \"\"\"\n return list(self.data.columns.values)\n\n @property\n def index(self):\n \"\"\"\n The time index of the data.\n \"\"\"\n return self.data.index\n\n @property\n def time_range(self):\n \"\"\"\n The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`.\n \"\"\"\n if len(self.data) > 0:\n return TimeRange(self.data.index.min(), self.data.index.max())\n else:\n return None\n\n# #### Data Access, Selection and Organisation Methods #### #\n\n def quantity(self, colname, **kwargs):\n \"\"\"\n Return a `~astropy.units.quantity.Quantity` for the given column.\n\n Parameters\n ----------\n colname : `str`\n The heading of the column you want to output.\n\n Returns\n -------\n `~astropy.units.quantity.Quantity`\n \"\"\"\n values = self.data[colname].values\n unit = self.units[colname]\n return u.Quantity(values, unit)\n\n def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):\n \"\"\"\n Return a new `~sunpy.timeseries.TimeSeries` with the given column added\n or updated.\n\n Parameters\n ----------\n colname : `str`\n The heading of the column you want output.\n quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`\n The values to be placed within the column.\n If updating values only then a numpy array is permitted.\n overwrite : `bool`, optional\n Defaults to `True`, allowing the method to overwrite a column already present in the `~sunpy.timeseries.TimeSeries`.\n\n Returns\n -------\n `sunpy.timeseries.TimeSeries`\n A new `~sunpy.timeseries.TimeSeries`.\n \"\"\"\n # Get the expected units from the quantity if required\n if not unit and isinstance(quantity, astropy.units.quantity.Quantity):\n unit = quantity.unit\n elif not unit:\n unit = u.dimensionless_unscaled\n\n # Make a copy of all the TimeSeries components.\n data = copy.copy(self.data)\n meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))\n units = copy.copy(self.units)\n\n # Add the unit to the units dictionary if already there.\n if not (colname in self.data.columns):\n units[colname] = unit\n\n # Convert the given quantity into values for given units if necessary.\n values = quantity\n if isinstance(values, astropy.units.quantity.Quantity) and overwrite:\n values = values.to(units[colname]).value\n\n # Update or add the data.\n if not (colname in self.data.columns) or overwrite:\n data[colname] = values\n\n # Return a new TimeSeries with the given updated/added column.\n return self.__class__(data, meta, units)\n\n def remove_column(self, colname):\n \"\"\"\n Remove a column.\n\n Parameters\n ----------\n colname : str\n The heading of the column to remove.\n\n Returns\n -------\n `sunpy.timeseries.TimeSeries`\n A new `~sunpy.timeseries.TimeSeries`.\n \"\"\"\n if colname not in self.columns:\n raise ValueError(f'Given column name ({colname}) not in list of columns {self.columns}')\n data = self.data.drop(colname, 'columns')\n units = self.units.copy()\n units.pop(colname)\n return self.__class__(data, self.meta, units)\n\n def sort_index(self, **kwargs):\n \"\"\"\n Returns a sorted version of a `~sunpy.timeseries.TimeSeries`. Generally\n this shouldn't be necessary as most `~sunpy.timeseries.TimeSeries`\n operations sort the data anyway to ensure consistent behavior when\n truncating.\n\n Returns\n -------\n `~sunpy.timeseries.TimeSeries`\n A new `~sunpy.timeseries.TimeSeries` in ascending chronological order.\n \"\"\"\n return GenericTimeSeries(self.data.sort_index(**kwargs),\n TimeSeriesMetaData(copy.copy(self.meta.metadata)),\n copy.copy(self.units))\n\n def truncate(self, a, b=None, int=None):\n \"\"\"\n Returns a truncated version of the TimeSeries object.\n\n Parameters\n ----------\n a : `sunpy.time.TimeRange`, `str`, `int`\n Either a time range to truncate to, or a start time in some format recognized by pandas, or a index integer.\n b : `str` or `int`, optional\n If specified, the end time of the time range in some format recognized by pandas, or a index integer.\n Defaults to `None`.\n int : `int`, optional\n If specified, the integer indicating the slicing intervals.\n Defaults to `None`.\n\n Returns\n -------\n `~sunpy.timeseries.TimeSeries`\n A new `~sunpy.timeseries.TimeSeries` with only the selected times.\n \"\"\"\n # Evaluate inputs\n # If given strings, then use to create a sunpy.time.timerange.TimeRange\n # for the SunPy text date parser.\n if isinstance(a, str) and isinstance(b, str):\n a = TimeRange(a, b)\n if isinstance(a, TimeRange):\n # If we have a TimeRange, extract the values\n start = a.start.datetime\n end = a.end.datetime\n else:\n # Otherwise we already have the values\n start = a\n end = b\n\n # If an interval integer was given then use in truncation.\n truncated_data = self.data.sort_index()[start:end:int]\n\n # Truncate the metadata\n # Check there is data still\n truncated_meta = TimeSeriesMetaData([])\n if len(truncated_data) > 0:\n tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())\n truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))\n truncated_meta._truncate(tr)\n\n # Build similar TimeSeries object and sanatise metadata and units.\n object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))\n object._sanitize_metadata()\n object._sanitize_units()\n return object\n\n def extract(self, column_name):\n \"\"\"\n Returns a new time series with the chosen column.\n\n Parameters\n ----------\n column_name : `str`\n A valid column name.\n\n Returns\n -------\n `~sunpy.timeseries.TimeSeries`\n A new `~sunpy.timeseries.TimeSeries` with only the selected column.\n \"\"\"\n # TODO: allow the extract function to pick more than one column\n # TODO: Fix this?\n # if isinstance(self, pandas.Series):\n # return self\n # else:\n # return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))\n\n # Extract column and remove empty rows\n data = self.data[[column_name]].dropna()\n\n # Build generic TimeSeries object and sanatise metadata and units.\n object = GenericTimeSeries(data.sort_index(),\n TimeSeriesMetaData(copy.copy(self.meta.metadata)),\n copy.copy(self.units))\n object._sanitize_metadata()\n object._sanitize_units()\n return object\n\n def concatenate(self, otherts, same_source=False, **kwargs):\n \"\"\"\n Concatenate with another `~sunpy.timeseries.TimeSeries`. This function\n will check and remove any duplicate times. It will keep the column\n values from the original timeseries to which the new time series is\n being added.\n\n Parameters\n ----------\n otherts : `~sunpy.timeseries.TimeSeries`\n Another `~sunpy.timeseries.TimeSeries`.\n same_source : `bool`, optional\n Set to `True` to check if the sources of the time series match. Defaults to `False`.\n\n Returns\n -------\n `~sunpy.timeseries.TimeSeries`\n A new `~sunpy.timeseries.TimeSeries`.\n\n Notes\n -----\n Extra keywords are passed to `pandas.concat`.\n \"\"\"\n # TODO: decide if we want to be able to concatenate multiple time series at once.\n # check to see if nothing needs to be done\n if self == otherts:\n return self\n\n # Check the sources match if specified.\n if same_source and not (isinstance(otherts, self.__class__)):\n raise TypeError(\"TimeSeries classes must match if specified.\")\n\n # Concatenate the metadata and data\n kwargs['sort'] = kwargs.pop('sort', False)\n meta = self.meta.concatenate(otherts.meta)\n data = pd.concat([self.data.copy(), otherts.data], **kwargs)\n\n # Add all the new units to the dictionary.\n units = OrderedDict()\n units.update(self.units)\n units.update(otherts.units)\n\n # If sources match then build similar TimeSeries.\n if self.__class__ == otherts.__class__:\n object = self.__class__(data.sort_index(), meta, units)\n else:\n # Build generic time series if the sources don't match.\n object = GenericTimeSeries(data.sort_index(), meta, units)\n\n # Sanatise metadata and units\n object._sanitize_metadata()\n object._sanitize_units()\n return object\n\n# #### Plotting Methods #### #\n\n def plot(self, axes=None, **plot_args):\n \"\"\"\n Plot a plot of the `~sunpy.timeseries.TimeSeries`.\n\n Parameters\n ----------\n axes : `~matplotlib.axes.Axes`, optional\n If provided the image will be plotted on the given axes.\n Defaults to `None`, so the current axes will be used.\n **plot_args : `dict`, optional\n Any additional plot arguments that should be used when plotting.\n\n Returns\n -------\n axes : `~matplotlib.axes.Axes`\n The plot axes.\n \"\"\"\n # Get current axes\n if axes is None:\n axes = plt.gca()\n\n axes = self.data.plot(ax=axes, **plot_args)\n\n return axes\n\n @peek_show\n def peek(self, **kwargs):\n \"\"\"\n Displays a graphical overview of the data in this object for user evaluation.\n For the creation of plots, users should instead use the\n `~sunpy.timeseries.GenericTimeSeries.plot` method and Matplotlib's pyplot framework.\n\n Parameters\n ----------\n **kwargs : `dict`\n Any additional plot arguments that should be used when plotting.\n \"\"\"\n # Check we have a timeseries valid for plotting\n self._validate_data_for_ploting()\n\n # Now make the plot\n figure = plt.figure()\n self.plot(**kwargs)\n\n return figure\n\n def _validate_data_for_ploting(self):\n \"\"\"\n Raises an exception if the `~sunpy.timeseries.TimeSeries` is invalid\n for plotting.\n\n This should be added into all `~sunpy.timeseries.TimeSeries`\n peek methods.\n \"\"\"\n # Check we have a valid TS\n if len(self.data) == 0:\n raise ValueError(\"The timeseries can't be plotted as it has no data present. \"\n \"(len(self.data) == 0)\")\n\n# #### Miscellaneous #### #\n\n def _validate_meta(self):\n \"\"\"\n Validates the meta-information associated with a\n `~sunpy.timeseries.TimeSeries`.\n\n This method includes very basic validation checks which apply to\n all of the kinds of files that SunPy can read. Datasource-\n specific validation should be handled in the relevant file in\n the \"sunpy.timeseries.sources\".\n \"\"\"\n warnings.simplefilter('always', Warning)\n\n for meta_property in ('cunit1', 'cunit2', 'waveunit'):\n if (self.meta.get(meta_property) and\n u.Unit(self.meta.get(meta_property),\n parse_strict='silent').physical_type == 'unknown'):\n\n warnings.warn(f\"Unknown value for {meta_property.upper()}.\", SunpyUserWarning)\n\n def _validate_units(self, units, **kwargs):\n \"\"\"\n Validates the astropy unit-information associated with a\n `~sunpy.timeseries.TimeSeries`.\n\n This method includes very basic validation checks which apply to\n all of the kinds of files that SunPy can read. Datasource-\n specific validation should be handled in the relevant file in\n the \"sunpy.timeseries.sources\".\n \"\"\"\n warnings.simplefilter('always', Warning)\n\n result = True\n for key in units:\n if not isinstance(units[key], astropy.units.UnitBase):\n # If this is not a unit then this can't be a valid units dict.\n result = False\n warnings.warn(f\"Invalid unit given for {key}.\", SunpyUserWarning)\n\n return result\n\n def _sanitize_units(self, **kwargs):\n \"\"\"\n Sanitizes the `collections.OrderedDict` used to store the units.\n\n Primarily this method will:\n\n * Remove entries that don't match up to a column.\n * Add unitless entries for columns with no units defined.\n * Re-arrange the order of the dictionary to match the columns.\n \"\"\"\n warnings.simplefilter('always', Warning)\n\n # Populate unspecified units:\n for column in set(self.data.columns.tolist()) - set(self.units.keys()):\n # For all columns not present in the units dictionary.\n self.units[column] = u.dimensionless_unscaled\n warnings.warn(f\"Unknown units for {column}.\", SunpyUserWarning)\n\n # Re-arrange so it's in the same order as the columns and removed unused.\n units = OrderedDict()\n for column in self.data.columns.tolist():\n units.update({column: self.units[column]})\n\n # Now use the amended units Ordered Dictionary\n self.units = units\n\n def _sanitize_metadata(self, **kwargs):\n \"\"\"\n Sanitizes the `~sunpy.timeseries.TimeSeriesMetaData` used to store the\n metadata.\n\n Primarily this method will:\n\n * Remove entries outside of the dates or truncate if the metadata overflows past the data.\n * Remove column references in the metadata that don't match to a column in the data.\n * Remove metadata entries that have no columns matching the data.\n \"\"\"\n warnings.simplefilter('always', Warning)\n\n # Truncate the metadata\n self.meta._truncate(self.time_range)\n\n # Remove non-existant columns\n redundant_cols = list(set(self.meta.columns) - set(self.columns))\n self.meta._remove_columns(redundant_cols)\n\n# #### Export/Output Methods #### #\n\n def to_table(self, **kwargs):\n \"\"\"\n Return an `astropy.table.Table` of the given\n `~sunpy.timeseries.TimeSeries`.\n\n Returns\n -------\n `~astropy.table.Table`\n A new `astropy.table.Table` containing the data from the `~sunpy.timeseries.TimeSeries`.\n The table will include units where relevant.\n \"\"\"\n # TODO: Table.from_pandas(df) doesn't include the index column. Add request?\n # Get data columns\n table = Table.from_pandas(self.data)\n\n # Get index column and add to table.\n index_col = Column(self.data.index.values, name='date')\n table.add_column(index_col, index=0)\n\n # Add in units.\n for key in self.units:\n table[key].unit = self.units[key]\n\n # Output the table\n return table\n\n def to_dataframe(self, **kwargs):\n \"\"\"\n Return a `~pandas.core.frame.DataFrame` of the given\n `~sunpy.timeseries.TimeSeries`.\n\n Returns\n -------\n `~pandas.core.frame.DataFrame`\n A `~pandas.core.frame.DataFrame` containing the data.\n \"\"\"\n return self.data\n\n def to_array(self, **kwargs):\n \"\"\"\n Return a `numpy.array` of the given `~sunpy.timeseries.TimeSeries`.\n\n Parameters\n ----------\n kwargs : `dict`\n All keyword arguments are passed to `pandas.DataFrame.to_numpy`.\n\n Returns\n -------\n `~numpy.ndarray`\n If the data is heterogeneous and contains booleans or objects, the result will be of ``dtype=object``.\n \"\"\"\n if hasattr(self.data, \"to_numpy\"):\n return self.data.to_numpy(**kwargs)\n else:\n return self.data.values\n\n def __eq__(self, other):\n \"\"\"\n Check two `~sunpy.timeseries.TimeSeries` are the same, they have\n matching type, data, metadata and units entries.\n\n Parameters\n ----------\n other : `~sunpy.timeseries.TimeSeries`\n The second `~sunpy.timeseries.TimeSeries` to compare with.\n\n Returns\n -------\n `bool`\n \"\"\"\n match = True\n if isinstance(other, type(self)):\n if ((not self.data.equals(other.data)) or\n (self.meta != other.meta) or\n (self.units != other.units)):\n match = False\n else:\n match = False\n return match\n\n def __ne__(self, other):\n \"\"\"\n Check two `~sunpy.timeseries.TimeSeries` are not the same, they don't\n have matching type, data, metadata and/or units entries.\n\n Parameters\n ----------\n other : `~sunpy.timeseries.TimeSeries`\n The second `~sunpy.timeseries.TimeSeries` to compare with.\n\n Returns\n -------\n `bool`\n \"\"\"\n return not self == other\n\n @classmethod\n def _parse_file(cls, filepath):\n \"\"\"\n Parses a file - to be implemented in any subclass that may use files.\n\n Parameters\n ----------\n filepath : `str`\n The path to the file you want to parse.\n \"\"\"\n return NotImplemented\n",
"path": "sunpy/timeseries/timeseriesbase.py"
}
] | [
{
"content": "\"\"\"\nThis module provies `sunpy.timeseries.GenericTimeSeries` which all other\n`sunpy.timeseries.TimeSeries` classes inherit from.\n\"\"\"\nimport copy\nimport warnings\nfrom collections import OrderedDict\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport astropy\nimport astropy.units as u\nfrom astropy.table import Column, Table\n\nfrom sunpy import config\nfrom sunpy.time import TimeRange\nfrom sunpy.timeseries import TimeSeriesMetaData\nfrom sunpy.util.exceptions import SunpyUserWarning\nfrom sunpy.util.metadata import MetaDict\nfrom sunpy.visualization import peek_show\n\n# define and register a new unit, needed for RHESSI\ndet = u.def_unit('detector')\nu.add_enabled_units([det])\n\nTIME_FORMAT = config.get(\"general\", \"time_format\")\n\n__all__ = [\"GenericTimeSeries\"]\n\n\nclass GenericTimeSeries:\n \"\"\"\n A generic time series object.\n\n Parameters\n ----------\n data : `~pandas.DataFrame`\n A `pandas.DataFrame` representing one or more fields as a function of time.\n meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional\n The metadata giving details about the time series data/instrument.\n Defaults to `None`.\n units : `dict`, optional\n A mapping from column names in ``data`` to the physical units of that column.\n Defaults to `None`.\n\n Attributes\n ----------\n data : `~pandas.DataFrame`\n A `pandas.DataFrame` representing one or more fields as a function of time.\n meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`\n The metadata giving details about the time series data/instrument.\n units : `dict`\n A mapping from column names in ``data`` to the physical units ofthat column.\n\n Examples\n --------\n >>> from sunpy.timeseries import TimeSeries\n >>> from sunpy.time import parse_time\n >>> from astropy.time import TimeDelta\n >>> import numpy as np\n >>> import pandas as pd\n >>> times = parse_time(\"now\") - TimeDelta(np.arange(24 * 60)*u.minute)\n >>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))\n >>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])\n >>> ts = TimeSeries(df)\n >>> ts.peek() # doctest: +SKIP\n\n References\n ----------\n * `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_\n \"\"\"\n # Class attribute used to specify the source class of the TimeSeries.\n _source = None\n _registry = dict()\n\n def __init_subclass__(cls, **kwargs):\n \"\"\"\n An __init_subclass__ hook initializes all of the subclasses of a given\n class.\n\n So for each subclass, it will call this block of code on import.\n This replicates some metaclass magic without the need to be\n aware of metaclasses. Here we use this to register each subclass\n in a dict that has the `is_datasource_for` attribute. This is\n then passed into the TimeSeries Factory so we can register them.\n \"\"\"\n super().__init_subclass__(**kwargs)\n if hasattr(cls, 'is_datasource_for'):\n cls._registry[cls] = cls.is_datasource_for\n\n # kwargs are not used here but are passed in for sources.\n def __init__(self, data, meta=None, units=None, **kwargs):\n self.data = data\n tr = self.time_range\n # Check metadata input\n if meta is None:\n # No meta given, so default\n self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))\n elif isinstance(meta, (dict, OrderedDict, MetaDict)):\n # Given the values for metadata (dict) and infer timerange and colnames from the data\n self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))\n elif isinstance(meta, tuple):\n # Given the values all in a tuple\n self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))\n else:\n # Should have a list of 3-tuples giving a complex metadata list.\n self.meta = meta\n\n if units is None:\n self.units = {}\n else:\n self.units = units\n\n # TODO: Fix this?\n # Validate input data\n # self._validate_meta()\n # self._validate_units()\n\n# #### Attribute definitions #### #\n\n @property\n def source(self):\n \"\"\"\n A string/object used to specify the source class of the TimeSeries.\n \"\"\"\n return self._source\n\n @property\n def columns(self):\n \"\"\"\n A list of all the names of the columns in the data.\n \"\"\"\n return list(self.data.columns.values)\n\n @property\n def index(self):\n \"\"\"\n The time index of the data.\n \"\"\"\n return self.data.index\n\n @property\n def shape(self):\n \"\"\"\n The shape of the data, a tuple (nrows, ncols).\n \"\"\"\n return self.data.shape\n\n @property\n def time_range(self):\n \"\"\"\n The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`.\n \"\"\"\n if len(self.data) > 0:\n return TimeRange(self.data.index.min(), self.data.index.max())\n else:\n return None\n\n# #### Data Access, Selection and Organisation Methods #### #\n\n def quantity(self, colname, **kwargs):\n \"\"\"\n Return a `~astropy.units.quantity.Quantity` for the given column.\n\n Parameters\n ----------\n colname : `str`\n The heading of the column you want to output.\n\n Returns\n -------\n `~astropy.units.quantity.Quantity`\n \"\"\"\n values = self.data[colname].values\n unit = self.units[colname]\n return u.Quantity(values, unit)\n\n def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):\n \"\"\"\n Return a new `~sunpy.timeseries.TimeSeries` with the given column added\n or updated.\n\n Parameters\n ----------\n colname : `str`\n The heading of the column you want output.\n quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`\n The values to be placed within the column.\n If updating values only then a numpy array is permitted.\n overwrite : `bool`, optional\n Defaults to `True`, allowing the method to overwrite a column already present in the `~sunpy.timeseries.TimeSeries`.\n\n Returns\n -------\n `sunpy.timeseries.TimeSeries`\n A new `~sunpy.timeseries.TimeSeries`.\n \"\"\"\n # Get the expected units from the quantity if required\n if not unit and isinstance(quantity, astropy.units.quantity.Quantity):\n unit = quantity.unit\n elif not unit:\n unit = u.dimensionless_unscaled\n\n # Make a copy of all the TimeSeries components.\n data = copy.copy(self.data)\n meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))\n units = copy.copy(self.units)\n\n # Add the unit to the units dictionary if already there.\n if not (colname in self.data.columns):\n units[colname] = unit\n\n # Convert the given quantity into values for given units if necessary.\n values = quantity\n if isinstance(values, astropy.units.quantity.Quantity) and overwrite:\n values = values.to(units[colname]).value\n\n # Update or add the data.\n if not (colname in self.data.columns) or overwrite:\n data[colname] = values\n\n # Return a new TimeSeries with the given updated/added column.\n return self.__class__(data, meta, units)\n\n def sort_index(self, **kwargs):\n \"\"\"\n Returns a sorted version of a `~sunpy.timeseries.TimeSeries`. Generally\n this shouldn't be necessary as most `~sunpy.timeseries.TimeSeries`\n operations sort the data anyway to ensure consistent behavior when\n truncating.\n\n Returns\n -------\n `~sunpy.timeseries.TimeSeries`\n A new `~sunpy.timeseries.TimeSeries` in ascending chronological order.\n \"\"\"\n return GenericTimeSeries(self.data.sort_index(**kwargs),\n TimeSeriesMetaData(copy.copy(self.meta.metadata)),\n copy.copy(self.units))\n\n def truncate(self, a, b=None, int=None):\n \"\"\"\n Returns a truncated version of the TimeSeries object.\n\n Parameters\n ----------\n a : `sunpy.time.TimeRange`, `str`, `int`\n Either a time range to truncate to, or a start time in some format recognized by pandas, or a index integer.\n b : `str` or `int`, optional\n If specified, the end time of the time range in some format recognized by pandas, or a index integer.\n Defaults to `None`.\n int : `int`, optional\n If specified, the integer indicating the slicing intervals.\n Defaults to `None`.\n\n Returns\n -------\n `~sunpy.timeseries.TimeSeries`\n A new `~sunpy.timeseries.TimeSeries` with only the selected times.\n \"\"\"\n # Evaluate inputs\n # If given strings, then use to create a sunpy.time.timerange.TimeRange\n # for the SunPy text date parser.\n if isinstance(a, str) and isinstance(b, str):\n a = TimeRange(a, b)\n if isinstance(a, TimeRange):\n # If we have a TimeRange, extract the values\n start = a.start.datetime\n end = a.end.datetime\n else:\n # Otherwise we already have the values\n start = a\n end = b\n\n # If an interval integer was given then use in truncation.\n truncated_data = self.data.sort_index()[start:end:int]\n\n # Truncate the metadata\n # Check there is data still\n truncated_meta = TimeSeriesMetaData([])\n if len(truncated_data) > 0:\n tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())\n truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))\n truncated_meta._truncate(tr)\n\n # Build similar TimeSeries object and sanatise metadata and units.\n object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))\n object._sanitize_metadata()\n object._sanitize_units()\n return object\n\n def extract(self, column_name):\n \"\"\"\n Returns a new time series with the chosen column.\n\n Parameters\n ----------\n column_name : `str`\n A valid column name.\n\n Returns\n -------\n `~sunpy.timeseries.TimeSeries`\n A new `~sunpy.timeseries.TimeSeries` with only the selected column.\n \"\"\"\n # TODO: allow the extract function to pick more than one column\n # TODO: Fix this?\n # if isinstance(self, pandas.Series):\n # return self\n # else:\n # return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))\n\n # Extract column and remove empty rows\n data = self.data[[column_name]].dropna()\n\n # Build generic TimeSeries object and sanatise metadata and units.\n object = GenericTimeSeries(data.sort_index(),\n TimeSeriesMetaData(copy.copy(self.meta.metadata)),\n copy.copy(self.units))\n object._sanitize_metadata()\n object._sanitize_units()\n return object\n\n def concatenate(self, otherts, same_source=False, **kwargs):\n \"\"\"\n Concatenate with another `~sunpy.timeseries.TimeSeries`. This function\n will check and remove any duplicate times. It will keep the column\n values from the original timeseries to which the new time series is\n being added.\n\n Parameters\n ----------\n otherts : `~sunpy.timeseries.TimeSeries`\n Another `~sunpy.timeseries.TimeSeries`.\n same_source : `bool`, optional\n Set to `True` to check if the sources of the time series match. Defaults to `False`.\n\n Returns\n -------\n `~sunpy.timeseries.TimeSeries`\n A new `~sunpy.timeseries.TimeSeries`.\n\n Notes\n -----\n Extra keywords are passed to `pandas.concat`.\n \"\"\"\n # TODO: decide if we want to be able to concatenate multiple time series at once.\n # check to see if nothing needs to be done\n if self == otherts:\n return self\n\n # Check the sources match if specified.\n if same_source and not (isinstance(otherts, self.__class__)):\n raise TypeError(\"TimeSeries classes must match if specified.\")\n\n # Concatenate the metadata and data\n kwargs['sort'] = kwargs.pop('sort', False)\n meta = self.meta.concatenate(otherts.meta)\n data = pd.concat([self.data.copy(), otherts.data], **kwargs)\n\n # Add all the new units to the dictionary.\n units = OrderedDict()\n units.update(self.units)\n units.update(otherts.units)\n\n # If sources match then build similar TimeSeries.\n if self.__class__ == otherts.__class__:\n object = self.__class__(data.sort_index(), meta, units)\n else:\n # Build generic time series if the sources don't match.\n object = GenericTimeSeries(data.sort_index(), meta, units)\n\n # Sanatise metadata and units\n object._sanitize_metadata()\n object._sanitize_units()\n return object\n\n# #### Plotting Methods #### #\n\n def plot(self, axes=None, **plot_args):\n \"\"\"\n Plot a plot of the `~sunpy.timeseries.TimeSeries`.\n\n Parameters\n ----------\n axes : `~matplotlib.axes.Axes`, optional\n If provided the image will be plotted on the given axes.\n Defaults to `None`, so the current axes will be used.\n **plot_args : `dict`, optional\n Any additional plot arguments that should be used when plotting.\n\n Returns\n -------\n axes : `~matplotlib.axes.Axes`\n The plot axes.\n \"\"\"\n # Get current axes\n if axes is None:\n axes = plt.gca()\n\n axes = self.data.plot(ax=axes, **plot_args)\n\n return axes\n\n @peek_show\n def peek(self, **kwargs):\n \"\"\"\n Displays a graphical overview of the data in this object for user evaluation.\n For the creation of plots, users should instead use the\n `~sunpy.timeseries.GenericTimeSeries.plot` method and Matplotlib's pyplot framework.\n\n Parameters\n ----------\n **kwargs : `dict`\n Any additional plot arguments that should be used when plotting.\n \"\"\"\n # Check we have a timeseries valid for plotting\n self._validate_data_for_ploting()\n\n # Now make the plot\n figure = plt.figure()\n self.plot(**kwargs)\n\n return figure\n\n def _validate_data_for_ploting(self):\n \"\"\"\n Raises an exception if the `~sunpy.timeseries.TimeSeries` is invalid\n for plotting.\n\n This should be added into all `~sunpy.timeseries.TimeSeries`\n peek methods.\n \"\"\"\n # Check we have a valid TS\n if len(self.data) == 0:\n raise ValueError(\"The timeseries can't be plotted as it has no data present. \"\n \"(len(self.data) == 0)\")\n\n# #### Miscellaneous #### #\n\n def _validate_meta(self):\n \"\"\"\n Validates the meta-information associated with a\n `~sunpy.timeseries.TimeSeries`.\n\n This method includes very basic validation checks which apply to\n all of the kinds of files that SunPy can read. Datasource-\n specific validation should be handled in the relevant file in\n the \"sunpy.timeseries.sources\".\n \"\"\"\n warnings.simplefilter('always', Warning)\n\n for meta_property in ('cunit1', 'cunit2', 'waveunit'):\n if (self.meta.get(meta_property) and\n u.Unit(self.meta.get(meta_property),\n parse_strict='silent').physical_type == 'unknown'):\n\n warnings.warn(f\"Unknown value for {meta_property.upper()}.\", SunpyUserWarning)\n\n def _validate_units(self, units, **kwargs):\n \"\"\"\n Validates the astropy unit-information associated with a\n `~sunpy.timeseries.TimeSeries`.\n\n This method includes very basic validation checks which apply to\n all of the kinds of files that SunPy can read. Datasource-\n specific validation should be handled in the relevant file in\n the \"sunpy.timeseries.sources\".\n \"\"\"\n warnings.simplefilter('always', Warning)\n\n result = True\n for key in units:\n if not isinstance(units[key], astropy.units.UnitBase):\n # If this is not a unit then this can't be a valid units dict.\n result = False\n warnings.warn(f\"Invalid unit given for {key}.\", SunpyUserWarning)\n\n return result\n\n def _sanitize_units(self, **kwargs):\n \"\"\"\n Sanitizes the `collections.OrderedDict` used to store the units.\n\n Primarily this method will:\n\n * Remove entries that don't match up to a column.\n * Add unitless entries for columns with no units defined.\n * Re-arrange the order of the dictionary to match the columns.\n \"\"\"\n warnings.simplefilter('always', Warning)\n\n # Populate unspecified units:\n for column in set(self.data.columns.tolist()) - set(self.units.keys()):\n # For all columns not present in the units dictionary.\n self.units[column] = u.dimensionless_unscaled\n warnings.warn(f\"Unknown units for {column}.\", SunpyUserWarning)\n\n # Re-arrange so it's in the same order as the columns and removed unused.\n units = OrderedDict()\n for column in self.data.columns.tolist():\n units.update({column: self.units[column]})\n\n # Now use the amended units Ordered Dictionary\n self.units = units\n\n def _sanitize_metadata(self, **kwargs):\n \"\"\"\n Sanitizes the `~sunpy.timeseries.TimeSeriesMetaData` used to store the\n metadata.\n\n Primarily this method will:\n\n * Remove entries outside of the dates or truncate if the metadata overflows past the data.\n * Remove column references in the metadata that don't match to a column in the data.\n * Remove metadata entries that have no columns matching the data.\n \"\"\"\n warnings.simplefilter('always', Warning)\n\n # Truncate the metadata\n self.meta._truncate(self.time_range)\n\n # Remove non-existant columns\n redundant_cols = list(set(self.meta.columns) - set(self.columns))\n self.meta._remove_columns(redundant_cols)\n\n# #### Export/Output Methods #### #\n\n def to_table(self, **kwargs):\n \"\"\"\n Return an `astropy.table.Table` of the given\n `~sunpy.timeseries.TimeSeries`.\n\n Returns\n -------\n `~astropy.table.Table`\n A new `astropy.table.Table` containing the data from the `~sunpy.timeseries.TimeSeries`.\n The table will include units where relevant.\n \"\"\"\n # TODO: Table.from_pandas(df) doesn't include the index column. Add request?\n # Get data columns\n table = Table.from_pandas(self.data)\n\n # Get index column and add to table.\n index_col = Column(self.data.index.values, name='date')\n table.add_column(index_col, index=0)\n\n # Add in units.\n for key in self.units:\n table[key].unit = self.units[key]\n\n # Output the table\n return table\n\n def to_dataframe(self, **kwargs):\n \"\"\"\n Return a `~pandas.core.frame.DataFrame` of the given\n `~sunpy.timeseries.TimeSeries`.\n\n Returns\n -------\n `~pandas.core.frame.DataFrame`\n A `~pandas.core.frame.DataFrame` containing the data.\n \"\"\"\n return self.data\n\n def to_array(self, **kwargs):\n \"\"\"\n Return a `numpy.array` of the given `~sunpy.timeseries.TimeSeries`.\n\n Parameters\n ----------\n kwargs : `dict`\n All keyword arguments are passed to `pandas.DataFrame.to_numpy`.\n\n Returns\n -------\n `~numpy.ndarray`\n If the data is heterogeneous and contains booleans or objects, the result will be of ``dtype=object``.\n \"\"\"\n if hasattr(self.data, \"to_numpy\"):\n return self.data.to_numpy(**kwargs)\n else:\n return self.data.values\n\n def __eq__(self, other):\n \"\"\"\n Check two `~sunpy.timeseries.TimeSeries` are the same, they have\n matching type, data, metadata and units entries.\n\n Parameters\n ----------\n other : `~sunpy.timeseries.TimeSeries`\n The second `~sunpy.timeseries.TimeSeries` to compare with.\n\n Returns\n -------\n `bool`\n \"\"\"\n match = True\n if isinstance(other, type(self)):\n if ((not self.data.equals(other.data)) or\n (self.meta != other.meta) or\n (self.units != other.units)):\n match = False\n else:\n match = False\n return match\n\n def __ne__(self, other):\n \"\"\"\n Check two `~sunpy.timeseries.TimeSeries` are not the same, they don't\n have matching type, data, metadata and/or units entries.\n\n Parameters\n ----------\n other : `~sunpy.timeseries.TimeSeries`\n The second `~sunpy.timeseries.TimeSeries` to compare with.\n\n Returns\n -------\n `bool`\n \"\"\"\n return not self == other\n\n @classmethod\n def _parse_file(cls, filepath):\n \"\"\"\n Parses a file - to be implemented in any subclass that may use files.\n\n Parameters\n ----------\n filepath : `str`\n The path to the file you want to parse.\n \"\"\"\n return NotImplemented\n",
"path": "sunpy/timeseries/timeseriesbase.py"
}
] | diff --git a/changelog/3380.feature.rst b/changelog/3380.feature.rst
new file mode 100644
index 00000000000..fe04ed5eea6
--- /dev/null
+++ b/changelog/3380.feature.rst
@@ -0,0 +1 @@
+Add `shape` property to TimeSeries.
\ No newline at end of file
diff --git a/sunpy/timeseries/tests/test_timeseriesbase.py b/sunpy/timeseries/tests/test_timeseriesbase.py
index 4411c304d61..2306b3e85d2 100644
--- a/sunpy/timeseries/tests/test_timeseriesbase.py
+++ b/sunpy/timeseries/tests/test_timeseriesbase.py
@@ -771,6 +771,10 @@ def test_ts_index(generic_ts):
assert (generic_ts.index == generic_ts.data.index).all()
+def test_ts_shape(generic_ts):
+ assert generic_ts.shape == generic_ts.data.shape
+
+
def test_ts_sort_index(generic_ts):
assert generic_ts.sort_index().data.equals(generic_ts.data.sort_index())
diff --git a/sunpy/timeseries/timeseriesbase.py b/sunpy/timeseries/timeseriesbase.py
index de89d99024f..6886ab9f8b1 100644
--- a/sunpy/timeseries/timeseriesbase.py
+++ b/sunpy/timeseries/timeseriesbase.py
@@ -141,6 +141,13 @@ def index(self):
"""
return self.data.index
+ @property
+ def shape(self):
+ """
+ The shape of the data, a tuple (nrows, ncols).
+ """
+ return self.data.shape
+
@property
def time_range(self):
"""
|
kartoza__prj.app-346 | Display thumbnails in a modal window when we click on fullscreen
We can see a lot of GIF in the QGIS changelog. These thumbnails are too small to see so I have to click on the button to see it fullscreen. For now, it redirects to the GIF url like http://changelog.qgis.org/media/images/entries/53f72a9cf1bf32d73eb5174c37e54c60002b9707.gif
The user needs to use the "previous" button in the web browser to come back to the changelog.
It would be better to implement a javascript modal window to show the GIF and to stay on the URL http://changelog.qgis.org/en/qgis/version/2.16.0/
| [
{
"content": "# coding=utf-8\n\n\"\"\"Project level settings.\n\nAdjust these values as needed but don't commit passwords etc. to any public\nrepository!\n\"\"\"\n\nimport os # noqa\nfrom django.utils.translation import ugettext_lazy as _\nfrom .utils import absolute_path\nfrom .contrib import * # noqa\n\n# Project apps\nINSTALLED_APPS += (\n 'base',\n 'changes',\n 'github_issue',\n 'vota',\n)\n\n# Due to profile page does not available,\n# this will redirect to home page after login\nLOGIN_REDIRECT_URL = '/'\n\n# How many versions to list in each project box\nPROJECT_VERSION_LIST_SIZE = 10\n\n# Set debug to false for production\nDEBUG = TEMPLATE_DEBUG = False\n\nSOUTH_TESTS_MIGRATE = False\n\n\n# Set languages which want to be translated\nLANGUAGES = (\n ('en', _('English')),\n ('af', _('Afrikaans')),\n ('id', _('Indonesian')),\n ('ko', _('Korean')),\n)\n\n# Set storage path for the translation files\nLOCALE_PATHS = (absolute_path('locale'),)\n\n\nMIDDLEWARE_CLASSES = (\n # For nav bar generation\n 'core.custom_middleware.NavContextMiddleware',\n) + MIDDLEWARE_CLASSES\n\n# Project specific javascript files to be pipelined\n# For third party libs like jquery should go in contrib.py\nPIPELINE_JS['project'] = {\n 'source_filenames': (\n 'js/csrf-ajax.js',\n 'js/changelog.js',\n 'js/github-issue.js'\n ),\n 'output_filename': 'js/project.js',\n}\n\n# Project specific css files to be pipelined\n# For third party libs like bootstrap should go in contrib.py\nPIPELINE_CSS['project'] = {\n 'source_filenames': (\n 'css/changelog.css',\n 'css/form.css',\n 'css/fonts.css'\n ),\n 'output_filename': 'css/project.css',\n 'extra_context': {\n 'media': 'screen, projection',\n },\n}\n",
"path": "django_project/core/settings/project.py"
}
] | [
{
"content": "# coding=utf-8\n\n\"\"\"Project level settings.\n\nAdjust these values as needed but don't commit passwords etc. to any public\nrepository!\n\"\"\"\n\nimport os # noqa\nfrom django.utils.translation import ugettext_lazy as _\nfrom .utils import absolute_path\nfrom .contrib import * # noqa\n\n# Project apps\nINSTALLED_APPS += (\n 'base',\n 'changes',\n 'github_issue',\n 'vota',\n)\n\n# Due to profile page does not available,\n# this will redirect to home page after login\nLOGIN_REDIRECT_URL = '/'\n\n# How many versions to list in each project box\nPROJECT_VERSION_LIST_SIZE = 10\n\n# Set debug to false for production\nDEBUG = TEMPLATE_DEBUG = False\n\nSOUTH_TESTS_MIGRATE = False\n\n\n# Set languages which want to be translated\nLANGUAGES = (\n ('en', _('English')),\n ('af', _('Afrikaans')),\n ('id', _('Indonesian')),\n ('ko', _('Korean')),\n)\n\n# Set storage path for the translation files\nLOCALE_PATHS = (absolute_path('locale'),)\n\n\nMIDDLEWARE_CLASSES = (\n # For nav bar generation\n 'core.custom_middleware.NavContextMiddleware',\n) + MIDDLEWARE_CLASSES\n\n# Project specific javascript files to be pipelined\n# For third party libs like jquery should go in contrib.py\nPIPELINE_JS['project'] = {\n 'source_filenames': (\n 'js/csrf-ajax.js',\n 'js/changelog.js',\n 'js/github-issue.js',\n 'js/entry.js',\n ),\n 'output_filename': 'js/project.js',\n}\n\n# Project specific css files to be pipelined\n# For third party libs like bootstrap should go in contrib.py\nPIPELINE_CSS['project'] = {\n 'source_filenames': (\n 'css/changelog.css',\n 'css/form.css',\n 'css/fonts.css'\n ),\n 'output_filename': 'css/project.css',\n 'extra_context': {\n 'media': 'screen, projection',\n },\n}\n",
"path": "django_project/core/settings/project.py"
}
] | diff --git a/django_project/changes/static/js/entry.js b/django_project/changes/static/js/entry.js
new file mode 100644
index 000000000..25ff41fb2
--- /dev/null
+++ b/django_project/changes/static/js/entry.js
@@ -0,0 +1,17 @@
+/**
+ * Created by Dimas Ciputra <[email protected]> on 15/07/16.
+ */
+
+$(".pop-image").on("click", function() {
+ $('#imagepreview').attr('src', $(this).children().attr('id'));
+ $('#image-url').attr('href', $(this).children().attr('id'));
+ $('#imagemodal').modal('show');
+ return false;
+});
+
+$(".pop-gif").on("click", function() {
+ $('#imagepreview').attr('src', $(this).siblings().attr('id'));
+ $('#image-url').attr('href', $(this).siblings().attr('id'));
+ $('#imagemodal').modal('show');
+ return false;
+});
\ No newline at end of file
diff --git a/django_project/changes/templates/entry/detail.html b/django_project/changes/templates/entry/detail.html
index 07ea89daa..3ce6b2a86 100644
--- a/django_project/changes/templates/entry/detail.html
+++ b/django_project/changes/templates/entry/detail.html
@@ -25,4 +25,6 @@ <h4 class="muted">Version:
<h5 id="comments">Comments</h5>
{% disqus_show_comments %}
+
{% endblock %}
+
diff --git a/django_project/changes/templates/entry/includes/entry_detail.html b/django_project/changes/templates/entry/includes/entry_detail.html
index 820d0be50..9faace702 100644
--- a/django_project/changes/templates/entry/includes/entry_detail.html
+++ b/django_project/changes/templates/entry/includes/entry_detail.html
@@ -36,15 +36,15 @@ <h3><span class="text-muted">Feature:</span> {{ entry.title }}</h3>
</div>
<div class="col-lg-4 text-center">
{% if entry.image_file|is_gif %}
- <img class="img-responsive img-rounded pull-right"
+ <img id="{{ MEDIA_URL }}{{ entry.image_file }}" class="img-responsive img-rounded pull-right"
data-gifffer="{{ MEDIA_URL }}{{ entry.image_file }}"
gifffer-alt=""/>{# see core/settings/contrib.py for large-entry #}
- <a href="{{ MEDIA_URL }}{{ entry.image_file }}">
- Click here for full size animation.
+ <a href="#" class="pop-gif">
+ Click here for bigger size animation.
</a>
{% else %}
- <a href="{{ MEDIA_URL }}{{ entry.image_file }}">
- <img class="img-responsive img-rounded pull-right"
+ <a href="#" class="pop-image">
+ <img id="{{ MEDIA_URL }}{{ entry.image_file }}" class="img-responsive img-rounded pull-right"
src="{{ entry.image_file|thumbnail_url:'large-entry' }}"
alt=""/>{# see core/settings/contrib.py for large-entry #}
</a>
diff --git a/django_project/core/base_static/css/changelog.css b/django_project/core/base_static/css/changelog.css
index 8a93b0fd7..2416c078f 100644
--- a/django_project/core/base_static/css/changelog.css
+++ b/django_project/core/base_static/css/changelog.css
@@ -129,4 +129,24 @@ ul.ui-sortable-disabled .order{
cursor: pointer;
}
+@media (min-width: 1200px) {
+ #imagemodal .modal-dialog {
+ width:1000px ;
+ text-align: center;
+ }
+ #imagepreview {
+ max-width: 900px;
+ }
+}
+
+@media (max-width: 1200px) {
+ #imagemodal .modal-dialog {
+ width:600px;
+ text-align: center;
+ }
+
+ #imagepreview {
+ max-width: 500px;
+ }
+}
diff --git a/django_project/core/base_templates/project_base.html b/django_project/core/base_templates/project_base.html
index b839de081..65d7fc214 100644
--- a/django_project/core/base_templates/project_base.html
+++ b/django_project/core/base_templates/project_base.html
@@ -111,6 +111,24 @@ <h4 class="modal-title">Report an issue</h4>
</p>
</nav>
</div>
+
+<!-- Creates the bootstrap modal where the thumbnaiul image will appear -->
+<div class="modal fade" id="imagemodal" tabindex="-1" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true">
+ <div class="modal-dialog" >
+ <div class="modal-content" >
+ <div class="modal-body" >
+ <a href="#" id="image-url" target="_blank">
+ <img src="" id="imagepreview">
+ </a>
+ </div>
+ Click the image to open original size image in new tab
+ <div class="modal-footer">
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ </div>
+ </div>
+</div>
+
{% compressed_js 'contrib' %}
{% compressed_js 'project' %}
{% block inline-js %}{% endblock %}
@@ -160,6 +178,7 @@ <h4 class="modal-title">Report an issue</h4>
});
</script>
+
{% if intercom_app_id and request.user %}
<script id="IntercomSettingsScriptTag">
window.intercomSettings = {
diff --git a/django_project/core/settings/project.py b/django_project/core/settings/project.py
index c3ade9912..439562c2f 100644
--- a/django_project/core/settings/project.py
+++ b/django_project/core/settings/project.py
@@ -55,7 +55,8 @@
'source_filenames': (
'js/csrf-ajax.js',
'js/changelog.js',
- 'js/github-issue.js'
+ 'js/github-issue.js',
+ 'js/entry.js',
),
'output_filename': 'js/project.js',
}
|
liqd__a4-opin-400 | fix function in api.js to use contenttype json and fix all react components
| [
{
"content": "\"\"\"\nDjango settings for euth_wagtail project.\n\nGenerated by 'django-admin startproject' using Django 1.9.1.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nfrom django.utils.translation import ugettext_lazy as _\n\nPROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'home',\n\n 'wagtail.wagtailforms',\n 'wagtail.wagtailredirects',\n 'wagtail.wagtailembeds',\n 'wagtail.wagtailsites',\n 'wagtail.wagtailusers',\n 'wagtail.wagtailsnippets',\n 'wagtail.wagtaildocs',\n 'wagtail.wagtailimages',\n 'wagtail.wagtailsearch',\n 'wagtail.wagtailadmin',\n 'wagtail.wagtailcore',\n 'wagtail.contrib.wagtailstyleguide',\n\n 'modelcluster',\n 'compressor',\n 'taggit',\n 'widget_tweaks',\n 'webpack_loader',\n 'easy_thumbnails',\n 'parler',\n 'ckeditor',\n 'ckeditor_uploader',\n\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django_countries',\n 'rest_framework',\n 'autofixture',\n 'rules.apps.AutodiscoverRulesConfig',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n\n 'euth.users.apps.UsersConfig',\n 'euth.organisations.apps.OrganisationsConfig',\n 'euth.projects.apps.ProjectsConfig',\n 'euth.comments.apps.CommentConfig',\n 'euth.phases.apps.PhasesConfig',\n 'euth.modules.apps.ModuleConfig',\n 'euth.ideas.apps.IdeaConfig',\n 'euth.ratings.apps.RatingsConfig',\n 'euth.reports.apps.ReportConfig',\n 'euth.dashboard.apps.DashboardConfig',\n 'euth.memberships.apps.MembershipsConfig',\n 'euth.documents.apps.DocumentConfig',\n 'euth.flashpoll.apps.FlashpollConfig',\n 'euth.contrib',\n]\n\nMIDDLEWARE_CLASSES = [\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'wagtail.wagtailcore.middleware.SiteMiddleware',\n 'wagtail.wagtailredirects.middleware.RedirectMiddleware',\n]\n\nSITE_ID = 1\n\nROOT_URLCONF = 'euth_wagtail.urls'\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'euth_wagtail.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'TEST': {\n 'NAME': os.path.join(BASE_DIR, 'test_db.sqlite3'),\n }\n }\n}\n\n\n# Auth\n# https://docs.djangoproject.com/en/1.8/topics/auth/customizing/\n\nAUTH_USER_MODEL = 'euth_users.User'\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_ALLOW_NONIMAGE_FILES = False\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n }\n}\n\nBLEACH_LIST = {\n 'default' : {\n 'tags': ['p','strong','em','u','ol','li','ul','a'],\n 'attributes': {\n 'a': ['href', 'rel'],\n },\n },\n 'image-editor': {\n 'tags': ['p','strong','em','u','ol','li','ul','a','img'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLANGUAGE_CODE = 'en'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nLANGUAGES = [\n ('en', _('English')),\n ('de', _('German')),\n ('it', _('Italien')),\n ('fr', _('French')),\n ('sv', _('Swedish')),\n ('sl', _('Slovene')),\n ('da', _('Danish')),\n]\n\nPARLER_LANGUAGES = {\n 1:[{'code': language_code } for language_code, language in LANGUAGES]\n}\n\n# fixtures\n\nFIXTURE_DIRS = [ os.path.join(PROJECT_DIR, 'fixtures') ]\n\nALLOWED_UPLOAD_IMAGES = ('image/png', 'image/jpeg', 'image/gif')\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'heroimage': {'size': (1500, 500), 'crop': 'smart'},\n 'project_thumbnail': {'size': (520, 330), 'crop': 'smart'},\n 'idea_image': {'size': (800, 0), 'crop': 'scale'},\n 'organisation_thumbnail': {'size': (740, 540), 'crop': 'smart'},\n 'avatar_small': {'size': (60, 60), 'crop': 'smart'},\n 'org_avatar_small': {'size': (60, 60), 'crop': 'scale'},\n 'org_avatar_medium': {'size': (200, 200), 'crop': 'scale'},\n }\n}\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n]\n\n\nWEBPACK_LOADER = {\n 'DEFAULT': {\n 'CACHE': False,\n 'BUNDLE_DIR_NAME': 'bundles/', # must end with slash\n 'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),\n 'POLL_INTERVAL': 0.1,\n 'IGNORE': ['.+\\.hot-update.js', '.+\\.map']\n }\n}\n\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'node_modules/jquery/dist'),\n os.path.join(BASE_DIR, 'node_modules/salvattore/dist'),\n os.path.join(BASE_DIR, 'node_modules/bootstrap-sass/assets/javascripts'),\n os.path.join(BASE_DIR, 'node_modules/bootstrap-sass/assets/stylesheets'),\n os.path.join(BASE_DIR, 'node_modules/font-awesome'),\n os.path.join(BASE_DIR, 'node_modules/owl.carousel/dist'),\n os.path.join(BASE_DIR, 'node_modules/flatpickr/assets'),\n os.path.join(BASE_DIR, 'node_modules/flatpickr/dist'),\n os.path.join(PROJECT_DIR, 'static'),\n]\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\nCOMPRESS_PRECOMPILERS = (\n ('text/x-scss', 'django_libsass.SassCompiler'),\n)\nLIBSASS_SOURCEMAPS = True\n\nEMAIL_SUBJECT_PREFIX = '[OPIN] '\n\n# Wagtail settings\n\nWAGTAIL_SITE_NAME = \"euth_wagtail\"\n\n# Authentification\n\nLOGIN_URL = 'account_login'\nLOGOUT_URL = 'account_logout'\nLOGIN_REDIRECT_URL = '/'\n\nACCOUNT_ADAPTER = 'euth.users.adapters.EuthAccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_SUBJECT_PREFIX = EMAIL_SUBJECT_PREFIX\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_SIGNUP_FORM_CLASS = 'euth.users.forms.SignUpForm'\nACCOUNT_USER_DISPLAY = 'euth.users.services.account_user_display'\nACCOUNT_USER_MODEL_USERNAME_FIELD = 'username'\nACCOUNT_USERNAME_REQUIRED = True\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_LOGIN_ON_PASSWORD_RESET = True\nACCOUNT_LOGOUT_REDIRECT_URL = '/'\nSOCIALACCOUNT_EMAIL_VERIFICATION = False\n\n# Euth settings\n\nCOMMENTABLES = (\n ('euth_ideas', 'idea'),\n ('euth_documents', 'paragraph'),\n ('euth_documents', 'document'),\n ('euth_comments', 'comment'),\n)\n\nRATEABLES = COMMENTABLES\n\nREPORTABLES = COMMENTABLES\n\nFLASHPOLL_URL = \"https://opin.flashpoll.eu/\"\n",
"path": "euth_wagtail/settings/base.py"
}
] | [
{
"content": "\"\"\"\nDjango settings for euth_wagtail project.\n\nGenerated by 'django-admin startproject' using Django 1.9.1.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nfrom django.utils.translation import ugettext_lazy as _\n\nPROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'home',\n\n 'wagtail.wagtailforms',\n 'wagtail.wagtailredirects',\n 'wagtail.wagtailembeds',\n 'wagtail.wagtailsites',\n 'wagtail.wagtailusers',\n 'wagtail.wagtailsnippets',\n 'wagtail.wagtaildocs',\n 'wagtail.wagtailimages',\n 'wagtail.wagtailsearch',\n 'wagtail.wagtailadmin',\n 'wagtail.wagtailcore',\n 'wagtail.contrib.wagtailstyleguide',\n\n 'modelcluster',\n 'compressor',\n 'taggit',\n 'widget_tweaks',\n 'webpack_loader',\n 'easy_thumbnails',\n 'parler',\n 'ckeditor',\n 'ckeditor_uploader',\n\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django_countries',\n 'rest_framework',\n 'autofixture',\n 'rules.apps.AutodiscoverRulesConfig',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n\n 'euth.users.apps.UsersConfig',\n 'euth.organisations.apps.OrganisationsConfig',\n 'euth.projects.apps.ProjectsConfig',\n 'euth.comments.apps.CommentConfig',\n 'euth.phases.apps.PhasesConfig',\n 'euth.modules.apps.ModuleConfig',\n 'euth.ideas.apps.IdeaConfig',\n 'euth.ratings.apps.RatingsConfig',\n 'euth.reports.apps.ReportConfig',\n 'euth.dashboard.apps.DashboardConfig',\n 'euth.memberships.apps.MembershipsConfig',\n 'euth.documents.apps.DocumentConfig',\n 'euth.flashpoll.apps.FlashpollConfig',\n 'euth.contrib',\n]\n\nMIDDLEWARE_CLASSES = [\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'wagtail.wagtailcore.middleware.SiteMiddleware',\n 'wagtail.wagtailredirects.middleware.RedirectMiddleware',\n]\n\nSITE_ID = 1\n\nROOT_URLCONF = 'euth_wagtail.urls'\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'euth_wagtail.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'TEST': {\n 'NAME': os.path.join(BASE_DIR, 'test_db.sqlite3'),\n }\n }\n}\n\n\n# Auth\n# https://docs.djangoproject.com/en/1.8/topics/auth/customizing/\n\nAUTH_USER_MODEL = 'euth_users.User'\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_ALLOW_NONIMAGE_FILES = False\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n }\n}\n\nBLEACH_LIST = {\n 'default' : {\n 'tags': ['p','strong','em','u','ol','li','ul','a'],\n 'attributes': {\n 'a': ['href', 'rel'],\n },\n },\n 'image-editor': {\n 'tags': ['p','strong','em','u','ol','li','ul','a','img'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLANGUAGE_CODE = 'en'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nLANGUAGES = [\n ('en', _('English')),\n ('de', _('German')),\n ('it', _('Italien')),\n ('fr', _('French')),\n ('sv', _('Swedish')),\n ('sl', _('Slovene')),\n ('da', _('Danish')),\n]\n\nPARLER_LANGUAGES = {\n 1:[{'code': language_code } for language_code, language in LANGUAGES]\n}\n\n# fixtures\n\nFIXTURE_DIRS = [ os.path.join(PROJECT_DIR, 'fixtures') ]\n\nALLOWED_UPLOAD_IMAGES = ('image/png', 'image/jpeg', 'image/gif')\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'heroimage': {'size': (1500, 500), 'crop': 'smart'},\n 'project_thumbnail': {'size': (520, 330), 'crop': 'smart'},\n 'idea_image': {'size': (800, 0), 'crop': 'scale'},\n 'organisation_thumbnail': {'size': (740, 540), 'crop': 'smart'},\n 'avatar_small': {'size': (60, 60), 'crop': 'smart'},\n 'org_avatar_small': {'size': (60, 60), 'crop': 'scale'},\n 'org_avatar_medium': {'size': (200, 200), 'crop': 'scale'},\n }\n}\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n]\n\n\nWEBPACK_LOADER = {\n 'DEFAULT': {\n 'CACHE': False,\n 'BUNDLE_DIR_NAME': 'bundles/', # must end with slash\n 'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),\n 'POLL_INTERVAL': 0.1,\n 'IGNORE': ['.+\\.hot-update.js', '.+\\.map']\n }\n}\n\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'node_modules/jquery/dist'),\n os.path.join(BASE_DIR, 'node_modules/salvattore/dist'),\n os.path.join(BASE_DIR, 'node_modules/bootstrap-sass/assets/javascripts'),\n os.path.join(BASE_DIR, 'node_modules/bootstrap-sass/assets/stylesheets'),\n os.path.join(BASE_DIR, 'node_modules/font-awesome'),\n os.path.join(BASE_DIR, 'node_modules/owl.carousel/dist'),\n os.path.join(BASE_DIR, 'node_modules/flatpickr/assets'),\n os.path.join(BASE_DIR, 'node_modules/flatpickr/dist'),\n os.path.join(PROJECT_DIR, 'static'),\n]\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\nCOMPRESS_PRECOMPILERS = (\n ('text/x-scss', 'django_libsass.SassCompiler'),\n)\nLIBSASS_SOURCEMAPS = True\n\nEMAIL_SUBJECT_PREFIX = '[OPIN] '\n\n# Wagtail settings\n\nWAGTAIL_SITE_NAME = \"euth_wagtail\"\n\n# Authentification\n\nLOGIN_URL = 'account_login'\nLOGOUT_URL = 'account_logout'\nLOGIN_REDIRECT_URL = '/'\n\nACCOUNT_ADAPTER = 'euth.users.adapters.EuthAccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_SUBJECT_PREFIX = EMAIL_SUBJECT_PREFIX\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_SIGNUP_FORM_CLASS = 'euth.users.forms.SignUpForm'\nACCOUNT_USER_DISPLAY = 'euth.users.services.account_user_display'\nACCOUNT_USER_MODEL_USERNAME_FIELD = 'username'\nACCOUNT_USERNAME_REQUIRED = True\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_LOGIN_ON_PASSWORD_RESET = True\nACCOUNT_LOGOUT_REDIRECT_URL = '/'\nSOCIALACCOUNT_EMAIL_VERIFICATION = False\n\n# Rest framework\n\nREST_FRAMEWORK = {\n 'DEFAULT_PARSER_CLASSES': (\n 'rest_framework.parsers.JSONParser',\n )\n}\n\n# Euth settings\n\nCOMMENTABLES = (\n ('euth_ideas', 'idea'),\n ('euth_documents', 'paragraph'),\n ('euth_documents', 'document'),\n ('euth_comments', 'comment'),\n)\n\nRATEABLES = COMMENTABLES\n\nREPORTABLES = COMMENTABLES\n\nFLASHPOLL_URL = \"https://opin.flashpoll.eu/\"\n",
"path": "euth_wagtail/settings/base.py"
}
] | diff --git a/euth/contrib/static/js/api.js b/euth/contrib/static/js/api.js
index 8171a3c50..b298097a2 100644
--- a/euth/contrib/static/js/api.js
+++ b/euth/contrib/static/js/api.js
@@ -18,33 +18,6 @@ var api = (function () {
}
function _sendRequest (endpoint, id, options, data, contentType) {
- var $body = $('body')
- var url = urls[endpoint]
- if (typeof id === 'object') {
- // there's no id, switch parameters
- data = options
- options = id
- } else if (typeof id === 'number') {
- url = url + id + '/'
- }
- var defaultParams = {
- url: url,
- dataType: 'json',
- data: data,
- error: function (xhr, status, err) {
- console.error(url, status, err.toString())
- },
- complete: function () {
- $body.removeClass('loading')
- }
- }
- var params = $.extend(defaultParams, options)
-
- $body.addClass('loading')
- return $.ajax(params)
- }
-
- function _sendJSONRequest (endpoint, id, options, data) {
var $body = $('body')
var url = urls[endpoint]
if (typeof id === 'object') {
@@ -58,7 +31,7 @@ var api = (function () {
url: url,
contentType: 'application/json; charset=utf-8',
dataType: 'json',
- data: data,
+ data: JSON.stringify(data),
error: function (xhr, status, err) {
console.error(url, status, err.toString())
},
@@ -120,12 +93,12 @@ var api = (function () {
},
document: {
add: function (data) {
- return _sendJSONRequest('document', {
+ return _sendRequest('document', {
type: 'POST'
}, data)
},
change: function (data, id) {
- return _sendJSONRequest('document', id, {
+ return _sendRequest('document', id, {
type: 'PUT'
}, data)
}
diff --git a/euth/documents/static/documents/ParagraphBox.js b/euth/documents/static/documents/ParagraphBox.js
index d5ccd749f..b0682d6bb 100644
--- a/euth/documents/static/documents/ParagraphBox.js
+++ b/euth/documents/static/documents/ParagraphBox.js
@@ -87,7 +87,7 @@ var ParagraphBox = React.createClass({
submitData['module'] = this.props.module
submitData['paragraphs'] = this.state.paragraphs
- api.document.change(JSON.stringify(submitData), id)
+ api.document.change(submitData, id)
.done(function (data) {
this.setState({
name: data.name,
@@ -107,7 +107,7 @@ var ParagraphBox = React.createClass({
submitData['module'] = this.props.module
submitData['paragraphs'] = this.state.paragraphs
- api.document.add(JSON.stringify(submitData))
+ api.document.add(submitData)
.done(function (data) {
this.setState({
name: data.name,
diff --git a/euth_wagtail/settings/base.py b/euth_wagtail/settings/base.py
index 0d688ec4f..633512694 100644
--- a/euth_wagtail/settings/base.py
+++ b/euth_wagtail/settings/base.py
@@ -317,6 +317,14 @@
ACCOUNT_LOGOUT_REDIRECT_URL = '/'
SOCIALACCOUNT_EMAIL_VERIFICATION = False
+# Rest framework
+
+REST_FRAMEWORK = {
+ 'DEFAULT_PARSER_CLASSES': (
+ 'rest_framework.parsers.JSONParser',
+ )
+}
+
# Euth settings
COMMENTABLES = (
|
pre-commit__pre-commit-376 | Newly gitignored (but file still exists) files are linted
(they should not be)
| [
{
"content": "from __future__ import unicode_literals\n\nimport functools\nimport logging\nimport os\nimport os.path\nimport re\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import memoize_by_cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef get_root():\n try:\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n except CalledProcessError:\n raise FatalError(\n 'Called from outside of the gits. Please cd to a git repository.'\n )\n\n\ndef get_git_dir(git_root):\n return os.path.normpath(os.path.join(\n git_root,\n cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),\n ))\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip('#').strip()\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith(('\\t', '#\\t'))\n ]\n\n\n@memoize_by_cwd\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = cmd_output(\n 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',\n )[1].splitlines()\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return cmd_output('git', 'diff', '--staged', '--name-only')[1].splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return cmd_output('git', 'ls-files')[1].splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(include_expr, exclude_expr):\n include_regex = re.compile(include_expr)\n exclude_regex = re.compile(exclude_expr)\n return set(\n filename\n for filename in all_file_list_strategy()\n if (\n include_regex.search(filename) and\n not exclude_regex.search(filename) and\n os.path.lexists(filename)\n )\n )\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\nget_conflicted_files_matching = get_files_matching(get_conflicted_files)\n",
"path": "pre_commit/git.py"
}
] | [
{
"content": "from __future__ import unicode_literals\n\nimport functools\nimport logging\nimport os\nimport os.path\nimport re\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import memoize_by_cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef get_root():\n try:\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n except CalledProcessError:\n raise FatalError(\n 'Called from outside of the gits. Please cd to a git repository.'\n )\n\n\ndef get_git_dir(git_root):\n return os.path.normpath(os.path.join(\n git_root,\n cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),\n ))\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip('#').strip()\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith(('\\t', '#\\t'))\n ]\n\n\n@memoize_by_cwd\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = cmd_output(\n 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',\n )[1].splitlines()\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return cmd_output(\n 'git', 'diff', '--staged', '--name-only',\n # Everything except for D\n '--diff-filter=ACMRTUXB'\n )[1].splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return cmd_output('git', 'ls-files')[1].splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(include_expr, exclude_expr):\n include_regex = re.compile(include_expr)\n exclude_regex = re.compile(exclude_expr)\n return set(\n filename\n for filename in all_file_list_strategy()\n if (\n include_regex.search(filename) and\n not exclude_regex.search(filename) and\n os.path.lexists(filename)\n )\n )\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\nget_conflicted_files_matching = get_files_matching(get_conflicted_files)\n",
"path": "pre_commit/git.py"
}
] | diff --git a/pre_commit/git.py b/pre_commit/git.py
index 796a0b8ae..1f16b6e0d 100644
--- a/pre_commit/git.py
+++ b/pre_commit/git.py
@@ -69,7 +69,11 @@ def get_conflicted_files():
@memoize_by_cwd
def get_staged_files():
- return cmd_output('git', 'diff', '--staged', '--name-only')[1].splitlines()
+ return cmd_output(
+ 'git', 'diff', '--staged', '--name-only',
+ # Everything except for D
+ '--diff-filter=ACMRTUXB'
+ )[1].splitlines()
@memoize_by_cwd
diff --git a/tests/git_test.py b/tests/git_test.py
index c4e014500..701d36b48 100644
--- a/tests/git_test.py
+++ b/tests/git_test.py
@@ -33,6 +33,16 @@ def test_get_root_not_git_dir(tempdir_factory):
git.get_root()
+def test_get_staged_files_deleted(tempdir_factory):
+ path = git_dir(tempdir_factory)
+ with cwd(path):
+ open('test', 'a').close()
+ cmd_output('git', 'add', 'test')
+ cmd_output('git', 'commit', '-m', 'foo', '--allow-empty')
+ cmd_output('git', 'rm', '--cached', 'test')
+ assert git.get_staged_files() == []
+
+
def test_is_not_in_merge_conflict(tempdir_factory):
path = git_dir(tempdir_factory)
with cwd(path):
|
conda__conda-build-1716 | UnboundLocalError with --skip-existing and --no-locking flags
Hit this today on conda-build 2.1.2. Also tried with the tip of master and I get the same result. For reproduciblity, this is the output for trying to build the conda.recipe folder inside of conda-build itself:
```
$ conda build conda.recipe --no-locking --skip-existing master :: 1h :: ⬢
Cloning into '/home/edill/miniconda/conda-bld/conda.recipe_1485803296268/work'...
done.
checkout: 'HEAD'
Your branch is up-to-date with 'origin/_conda_cache_origin_head'.
==> git log -n1 <==
commit 6922ec3ed1afc287a4cd7f3872572f2bef89d892
Merge: 837fbc8 c82ea9b
Author: Mike Sarahan <[email protected]>
Date: Mon Jan 30 11:38:01 2017 -0600
Merge pull request #1704 from jerowe/feature/fix-perl-build
adding some fixes to cpan skeleton
==> git describe --tags --dirty <==
2.1.2-20-g6922ec3
==> git status <==
On branch _conda_cache_origin_head
Your branch is up-to-date with 'origin/_conda_cache_origin_head'.
nothing to commit, working directory clean
updating index in: /home/edill/miniconda/conda-bld/linux-64
Traceback (most recent call last):
File "/home/edill/miniconda/bin/conda-build", line 11, in <module>
load_entry_point('conda-build', 'console_scripts', 'conda-build')()
File "/home/edill/dev/conda/conda-build/conda_build/cli/main_build.py", line 322, in main
execute(sys.argv[1:])
File "/home/edill/dev/conda/conda-build/conda_build/cli/main_build.py", line 313, in execute
noverify=args.no_verify)
File "/home/edill/dev/conda/conda-build/conda_build/api.py", line 97, in build
need_source_download=need_source_download, config=config)
File "/home/edill/dev/conda/conda-build/conda_build/build.py", line 1478, in build_tree
config=config)
File "/home/edill/dev/conda/conda-build/conda_build/build.py", line 928, in build
package_exists = is_package_built(m, config)
File "/home/edill/dev/conda/conda-build/conda_build/build.py", line 1633, in is_package_built
update_index(d, config, could_be_mirror=False)
File "/home/edill/dev/conda/conda-build/conda_build/index.py", line 83, in update_index
with try_acquire_locks(locks, config.timeout):
UnboundLocalError: local variable 'locks' referenced before assignment
```
And some debug info
```
$ conda info
Current conda install:
platform : linux-64
conda version : 4.2.13
conda is private : False
conda-env version : 4.2.13
conda-build version : 2.1.2+20.g6922ec3
python version : 3.5.3.final.0
requests version : 2.13.0
root environment : /home/edill/miniconda (writable)
default environment : /home/edill/miniconda
envs directories : /home/edill/miniconda/envs
package cache : /home/edill/miniconda/pkgs
channel URLs : ...
config file : /home/edill/.condarc
offline mode : False
```
| [
{
"content": "'''\nFunctions related to creating repodata index files.\n'''\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport bz2\nimport sys\nimport json\nimport tarfile\nfrom os.path import isfile, join, getmtime\n\nfrom conda_build.utils import file_info, get_lock, try_acquire_locks\nfrom .conda_interface import PY3, md5_file\n\n\ndef read_index_tar(tar_path, config, lock):\n \"\"\" Returns the index.json dict inside the given package tarball. \"\"\"\n if config.locking:\n locks = [lock]\n with try_acquire_locks(locks, config.timeout):\n with tarfile.open(tar_path) as t:\n try:\n return json.loads(t.extractfile('info/index.json').read().decode('utf-8'))\n except EOFError:\n raise RuntimeError(\"Could not extract %s. File probably corrupt.\"\n % tar_path)\n except OSError as e:\n raise RuntimeError(\"Could not extract %s (%s)\" % (tar_path, e))\n except tarfile.ReadError:\n raise RuntimeError(\"Could not extract metadata from %s. \"\n \"File probably corrupt.\" % tar_path)\n\n\ndef write_repodata(repodata, dir_path, lock, config=None):\n \"\"\" Write updated repodata.json and repodata.json.bz2 \"\"\"\n if not config:\n import conda_build.config\n config = conda_build.config.config\n if config.locking:\n locks = [lock]\n with try_acquire_locks(locks, config.timeout):\n data = json.dumps(repodata, indent=2, sort_keys=True)\n # strip trailing whitespace\n data = '\\n'.join(line.rstrip() for line in data.splitlines())\n # make sure we have newline at the end\n if not data.endswith('\\n'):\n data += '\\n'\n with open(join(dir_path, 'repodata.json'), 'w') as fo:\n fo.write(data)\n with open(join(dir_path, 'repodata.json.bz2'), 'wb') as fo:\n fo.write(bz2.compress(data.encode('utf-8')))\n\n\ndef update_index(dir_path, config, force=False, check_md5=False, remove=True, lock=None,\n could_be_mirror=True):\n \"\"\"\n Update all index files in dir_path with changed packages.\n\n :param verbose: Should detailed status messages be output?\n :type verbose: bool\n :param force: Whether to re-index all packages (including those that\n haven't changed) or not.\n :type force: bool\n :param check_md5: Whether to check MD5s instead of mtimes for determining\n if a package changed.\n :type check_md5: bool\n \"\"\"\n\n if config.verbose:\n print(\"updating index in:\", dir_path)\n index_path = join(dir_path, '.index.json')\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n\n if not lock:\n lock = get_lock(dir_path)\n\n if config.locking:\n locks = [lock]\n\n with try_acquire_locks(locks, config.timeout):\n if force:\n index = {}\n else:\n try:\n mode_dict = {'mode': 'r', 'encoding': 'utf-8'} if PY3 else {'mode': 'rb'}\n with open(index_path, **mode_dict) as fi:\n index = json.load(fi)\n except (IOError, ValueError):\n index = {}\n\n files = set(fn for fn in os.listdir(dir_path) if fn.endswith('.tar.bz2'))\n if could_be_mirror and any(fn.startswith('_license-') for fn in files):\n sys.exit(\"\"\"\\\n Error:\n Indexing a copy of the Anaconda conda package channel is neither\n necessary nor supported. If you wish to add your own packages,\n you can do so by adding them to a separate channel.\n \"\"\")\n for fn in files:\n path = join(dir_path, fn)\n if fn in index:\n if check_md5:\n if index[fn]['md5'] == md5_file(path):\n continue\n elif index[fn]['mtime'] == getmtime(path):\n continue\n if config.verbose:\n print('updating:', fn)\n d = read_index_tar(path, config, lock=lock)\n d.update(file_info(path))\n index[fn] = d\n\n for fn in files:\n index[fn]['sig'] = '.' if isfile(join(dir_path, fn + '.sig')) else None\n\n if remove:\n # remove files from the index which are not on disk\n for fn in set(index) - files:\n if config.verbose:\n print(\"removing:\", fn)\n del index[fn]\n\n # Deal with Python 2 and 3's different json module type reqs\n mode_dict = {'mode': 'w', 'encoding': 'utf-8'} if PY3 else {'mode': 'wb'}\n with open(index_path, **mode_dict) as fo:\n json.dump(index, fo, indent=2, sort_keys=True, default=str)\n\n # --- new repodata\n for fn in index:\n info = index[fn]\n for varname in 'arch', 'platform', 'mtime', 'ucs':\n try:\n del info[varname]\n except KeyError:\n pass\n\n if 'requires' in info and 'depends' not in info:\n info['depends'] = info['requires']\n\n repodata = {'packages': index, 'info': {}}\n write_repodata(repodata, dir_path, lock=lock, config=config)\n",
"path": "conda_build/index.py"
}
] | [
{
"content": "'''\nFunctions related to creating repodata index files.\n'''\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport bz2\nimport sys\nimport json\nimport tarfile\nfrom os.path import isfile, join, getmtime\n\nfrom conda_build.utils import file_info, get_lock, try_acquire_locks\nfrom .conda_interface import PY3, md5_file\n\n\ndef read_index_tar(tar_path, config, lock):\n \"\"\" Returns the index.json dict inside the given package tarball. \"\"\"\n if config.locking:\n locks = [lock]\n with try_acquire_locks(locks, config.timeout):\n with tarfile.open(tar_path) as t:\n try:\n return json.loads(t.extractfile('info/index.json').read().decode('utf-8'))\n except EOFError:\n raise RuntimeError(\"Could not extract %s. File probably corrupt.\"\n % tar_path)\n except OSError as e:\n raise RuntimeError(\"Could not extract %s (%s)\" % (tar_path, e))\n except tarfile.ReadError:\n raise RuntimeError(\"Could not extract metadata from %s. \"\n \"File probably corrupt.\" % tar_path)\n\n\ndef write_repodata(repodata, dir_path, lock, config=None):\n \"\"\" Write updated repodata.json and repodata.json.bz2 \"\"\"\n if not config:\n import conda_build.config\n config = conda_build.config.config\n if config.locking:\n locks = [lock]\n with try_acquire_locks(locks, config.timeout):\n data = json.dumps(repodata, indent=2, sort_keys=True)\n # strip trailing whitespace\n data = '\\n'.join(line.rstrip() for line in data.splitlines())\n # make sure we have newline at the end\n if not data.endswith('\\n'):\n data += '\\n'\n with open(join(dir_path, 'repodata.json'), 'w') as fo:\n fo.write(data)\n with open(join(dir_path, 'repodata.json.bz2'), 'wb') as fo:\n fo.write(bz2.compress(data.encode('utf-8')))\n\n\ndef update_index(dir_path, config, force=False, check_md5=False, remove=True, lock=None,\n could_be_mirror=True):\n \"\"\"\n Update all index files in dir_path with changed packages.\n\n :param verbose: Should detailed status messages be output?\n :type verbose: bool\n :param force: Whether to re-index all packages (including those that\n haven't changed) or not.\n :type force: bool\n :param check_md5: Whether to check MD5s instead of mtimes for determining\n if a package changed.\n :type check_md5: bool\n \"\"\"\n\n if config.verbose:\n print(\"updating index in:\", dir_path)\n index_path = join(dir_path, '.index.json')\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n\n if not lock:\n lock = get_lock(dir_path)\n\n locks = []\n if config.locking:\n locks.append(lock)\n\n with try_acquire_locks(locks, config.timeout):\n if force:\n index = {}\n else:\n try:\n mode_dict = {'mode': 'r', 'encoding': 'utf-8'} if PY3 else {'mode': 'rb'}\n with open(index_path, **mode_dict) as fi:\n index = json.load(fi)\n except (IOError, ValueError):\n index = {}\n\n files = set(fn for fn in os.listdir(dir_path) if fn.endswith('.tar.bz2'))\n if could_be_mirror and any(fn.startswith('_license-') for fn in files):\n sys.exit(\"\"\"\\\n Error:\n Indexing a copy of the Anaconda conda package channel is neither\n necessary nor supported. If you wish to add your own packages,\n you can do so by adding them to a separate channel.\n \"\"\")\n for fn in files:\n path = join(dir_path, fn)\n if fn in index:\n if check_md5:\n if index[fn]['md5'] == md5_file(path):\n continue\n elif index[fn]['mtime'] == getmtime(path):\n continue\n if config.verbose:\n print('updating:', fn)\n d = read_index_tar(path, config, lock=lock)\n d.update(file_info(path))\n index[fn] = d\n\n for fn in files:\n index[fn]['sig'] = '.' if isfile(join(dir_path, fn + '.sig')) else None\n\n if remove:\n # remove files from the index which are not on disk\n for fn in set(index) - files:\n if config.verbose:\n print(\"removing:\", fn)\n del index[fn]\n\n # Deal with Python 2 and 3's different json module type reqs\n mode_dict = {'mode': 'w', 'encoding': 'utf-8'} if PY3 else {'mode': 'wb'}\n with open(index_path, **mode_dict) as fo:\n json.dump(index, fo, indent=2, sort_keys=True, default=str)\n\n # --- new repodata\n for fn in index:\n info = index[fn]\n for varname in 'arch', 'platform', 'mtime', 'ucs':\n try:\n del info[varname]\n except KeyError:\n pass\n\n if 'requires' in info and 'depends' not in info:\n info['depends'] = info['requires']\n\n repodata = {'packages': index, 'info': {}}\n write_repodata(repodata, dir_path, lock=lock, config=config)\n",
"path": "conda_build/index.py"
}
] | diff --git a/conda_build/index.py b/conda_build/index.py
index 6260354f61..7e7c4dee19 100644
--- a/conda_build/index.py
+++ b/conda_build/index.py
@@ -77,8 +77,9 @@ def update_index(dir_path, config, force=False, check_md5=False, remove=True, lo
if not lock:
lock = get_lock(dir_path)
+ locks = []
if config.locking:
- locks = [lock]
+ locks.append(lock)
with try_acquire_locks(locks, config.timeout):
if force:
|
djangopackages__djangopackages-851 | Packages with custom git repos are not being scored
See this tweet: https://twitter.com/morenoh149/status/1580971411145125888
Package scoring should factor in packages that exist on PyPI, but might have a custom repo location. They appear to be scored as a 0 and won't show up in Grids.
| [
{
"content": "\"\"\"views for the :mod:`grid` app\"\"\"\n\nimport json\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.db.models import Count, Q\nfrom django.http import HttpResponseRedirect, Http404, HttpResponseForbidden\nfrom django.shortcuts import get_object_or_404, render\nfrom django.urls import reverse\nfrom django_tables2 import SingleTableView\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView\n\nfrom grid.forms import ElementForm, FeatureForm, GridForm, GridPackageForm\nfrom grid.models import Element, Feature, Grid, GridPackage\nfrom grid.tables import GridTable\nfrom package.models import Package\nfrom package.forms import PackageForm\nfrom package.views import repo_data_for_js\n\n\ndef build_element_map(elements):\n # Horrifying two-level dict due to needing to use hash() function later\n element_map = {}\n for element in elements:\n element_map.setdefault(element.feature_id, {})\n element_map[element.feature_id][element.grid_package_id] = element\n return element_map\n\n\nclass GridListView(SingleTableView):\n table_class = GridTable\n template_name = \"grid/grids.html\"\n paginate_by = 100\n\n def get_queryset(self):\n return (\n Grid.objects.filter()\n .prefetch_related(\"feature_set\")\n .annotate(gridpackage_count=Count(\"gridpackage\"))\n .filter(gridpackage_count__gt=0)\n .order_by(\"-modified\", \"title\")\n )\n\n\n@login_required\ndef add_grid(request, template_name=\"grid/update_grid.html\"):\n \"\"\"Creates a new grid, requires user to be logged in.\n Works for both GET and POST request methods\n\n Template context:\n\n * ``form`` - an instance of :class:`~app.grid.forms.GridForm`\n \"\"\"\n\n if not request.user.profile.can_add_grid:\n return HttpResponseForbidden(\"permission denied\")\n\n new_grid = Grid()\n form = GridForm(request.POST or None, instance=new_grid)\n\n if form.is_valid():\n new_grid = form.save()\n return HttpResponseRedirect(reverse(\"grid\", kwargs={\"slug\": new_grid.slug}))\n\n return render(request, template_name, {\"form\": form})\n\n\n@login_required\ndef edit_grid(request, slug, template_name=\"grid/update_grid.html\"):\n \"\"\"View to modify the grid, handles GET and POST requests.\n This view requires user to be logged in.\n\n Template context:\n\n * ``form`` - instance of :class:`grid.forms.GridForm`\n \"\"\"\n\n if not request.user.profile.can_edit_grid:\n return HttpResponseForbidden(\"permission denied\")\n\n grid = get_object_or_404(Grid, slug=slug)\n form = GridForm(request.POST or None, instance=grid)\n\n if form.is_valid():\n grid = form.save()\n message = \"Grid has been edited\"\n messages.add_message(request, messages.INFO, message)\n return HttpResponseRedirect(reverse(\"grid\", kwargs={\"slug\": grid.slug}))\n return render(request, template_name, {\"form\": form, \"grid\": grid})\n\n\n@login_required\ndef add_feature(request, grid_slug, template_name=\"grid/update_feature.html\"):\n \"\"\"Adds a feature to the grid, accepts GET and POST requests.\n\n Requires user to be logged in\n\n Template context:\n\n * ``form`` - instance of :class:`grid.forms.FeatureForm` form\n * ``grid`` - instance of :class:`grid.models.Grid` model\n \"\"\"\n\n if not request.user.profile.can_add_grid_feature:\n return HttpResponseForbidden(\"permission denied\")\n\n grid = get_object_or_404(Grid, slug=grid_slug)\n form = FeatureForm(request.POST or None)\n\n if form.is_valid():\n feature = form.save(commit=False)\n feature.grid = grid\n feature.save()\n return HttpResponseRedirect(reverse(\"grid\", kwargs={\"slug\": feature.grid.slug}))\n\n return render(request, template_name, {\"form\": form, \"grid\": grid})\n\n\n@login_required\ndef edit_feature(request, id, template_name=\"grid/update_feature.html\"):\n \"\"\"edits feature on a grid - this view has the same\n semantics as :func:`grid.views.add_feature`.\n\n Requires the user to be logged in.\n \"\"\"\n\n if not request.user.profile.can_edit_grid_feature:\n return HttpResponseForbidden(\"permission denied\")\n\n feature = get_object_or_404(Feature, id=id)\n form = FeatureForm(request.POST or None, instance=feature)\n\n if form.is_valid():\n feature = form.save()\n return HttpResponseRedirect(reverse(\"grid\", kwargs={\"slug\": feature.grid.slug}))\n\n return render(request, template_name, {\"form\": form, \"grid\": feature.grid})\n\n\n@permission_required(\"grid.delete_feature\")\ndef delete_feature(request, id, template_name=\"grid/edit_feature.html\"):\n # do not need to check permission via profile because\n # we default to being strict about deleting\n \"\"\"deletes a feature from the grid, ``id`` is id of the\n :class:`grid.models.Feature` model that is to be deleted\n\n Requires permission `grid.delete_feature`.\n\n Redirects to the parent :func:`grid.views.grid_detail`\n \"\"\"\n\n feature = get_object_or_404(Feature, id=id)\n Element.objects.filter(feature=feature).delete()\n feature.delete()\n\n return HttpResponseRedirect(reverse(\"grid\", kwargs={\"slug\": feature.grid.slug}))\n\n\n@permission_required(\"grid.delete_gridpackage\")\ndef delete_grid_package(request, id, template_name=\"grid/edit_feature.html\"):\n \"\"\"Deletes package from the grid, ``id`` is the id of the\n :class:`grid.models.GridPackage` instance\n\n Requires permission ``grid.delete_gridpackage``.\n\n Redirects to :func:`grid.views.grid_detail`.\n \"\"\"\n\n # do not need to check permission via profile because\n # we default to being strict about deleting\n grid_package = get_object_or_404(GridPackage, id=id)\n grid_package.grid.clear_detail_template_cache()\n Element.objects.filter(grid_package=grid_package).delete()\n grid_package.delete()\n\n return HttpResponseRedirect(\n reverse(\"grid\", kwargs={\"slug\": grid_package.grid.slug})\n )\n\n\n@login_required\ndef edit_element(\n request, feature_id, package_id, template_name=\"grid/edit_element.html\"\n):\n\n if not request.user.profile.can_edit_grid_element:\n return HttpResponseForbidden(\"permission denied\")\n\n feature = get_object_or_404(Feature, pk=feature_id)\n grid_package = get_object_or_404(GridPackage, pk=package_id)\n\n # Sanity check to make sure both the feature and grid_package are related to\n # the same grid!\n if feature.grid_id != grid_package.grid_id:\n raise Http404\n\n element, created = Element.objects.get_or_create(\n grid_package=grid_package, feature=feature\n )\n\n form = ElementForm(request.POST or None, instance=element)\n\n if form.is_valid():\n element = form.save()\n return HttpResponseRedirect(reverse(\"grid\", kwargs={\"slug\": feature.grid.slug}))\n\n return render(\n request,\n template_name,\n {\n \"form\": form,\n \"feature\": feature,\n \"package\": grid_package.package,\n \"grid\": feature.grid,\n },\n )\n\n\n@login_required\ndef add_grid_package(request, grid_slug, template_name=\"grid/add_grid_package.html\"):\n \"\"\"Add an existing package to this grid.\"\"\"\n\n if not request.user.profile.can_add_grid_package:\n return HttpResponseForbidden(\"permission denied\")\n\n grid = get_object_or_404(Grid, slug=grid_slug)\n grid_package = GridPackage()\n form = GridPackageForm(request.POST or None, instance=grid_package)\n\n if form.is_valid():\n package = get_object_or_404(Package, id=request.POST[\"package\"])\n try:\n GridPackage.objects.get(grid=grid, package=package)\n message = \"Sorry, but '%s' is already in this grid.\" % package.title\n messages.add_message(request, messages.ERROR, message)\n except GridPackage.DoesNotExist:\n grid_package = GridPackage(grid=grid, package=package)\n grid_package.save()\n grid.clear_detail_template_cache()\n redirect = request.POST.get(\"redirect\", \"\")\n if redirect:\n return HttpResponseRedirect(redirect)\n\n return HttpResponseRedirect(reverse(\"grid\", kwargs={\"slug\": grid.slug}))\n\n return render(request, template_name, {\"form\": form, \"grid\": grid})\n\n\n@login_required\ndef add_new_grid_package(request, grid_slug, template_name=\"package/package_form.html\"):\n \"\"\"Add a package to a grid that isn't yet represented on the site.\"\"\"\n\n if not request.user.profile.can_add_grid_package:\n return HttpResponseForbidden(\"permission denied\")\n\n grid = get_object_or_404(Grid, slug=grid_slug)\n\n new_package = Package()\n form = PackageForm(request.POST or None, instance=new_package)\n\n if form.is_valid():\n new_package = form.save()\n GridPackage.objects.create(grid=grid, package=new_package)\n return HttpResponseRedirect(reverse(\"grid\", kwargs={\"slug\": grid_slug}))\n\n return render(\n request,\n template_name,\n {\"form\": form, \"repo_data\": repo_data_for_js(), \"action\": \"add\"},\n )\n\n\ndef ajax_grid_list(request, template_name=\"grid/ajax_grid_list.html\"):\n q = request.GET.get(\"q\", \"\")\n grids = []\n if q:\n grids = Grid.objects.filter(title__istartswith=q)\n package_id = request.GET.get(\"package_id\", \"\")\n if package_id:\n grids = grids.exclude(gridpackage__package__id=package_id)\n return render(request, template_name, {\"grids\": grids})\n\n\ndef grid_detail(request, slug, template_name=\"grid/grid_detail.html\"):\n \"\"\"displays a grid in detail\n\n Template context:\n\n * ``grid`` - the grid object\n * ``elements`` - elements of the grid\n * ``features`` - feature set used in the grid\n * ``grid_packages`` - packages involved in the current grid\n \"\"\"\n grid = get_object_or_404(Grid, slug=slug)\n\n # features = grid.feature_set.select_related(None)\n features = Feature.objects.filter(grid=grid)\n\n filters = {\n \"python3\": request.GET.get(\"python3\") == \"1\",\n \"stable\": request.GET.get(\"stable\") == \"1\",\n }\n\n grid_packages = grid.grid_packages.select_related(\"package\").filter(\n package__score__gt=max(0, settings.PACKAGE_SCORE_MIN)\n )\n\n if filters.get(\"python3\"):\n grid_packages = grid_packages.filter(package__version__supports_python3=True)\n\n if filters.get(\"stable\"):\n grid_packages = grid_packages.filter(package__version__development_status=5)\n\n grid_packages = grid_packages.order_by(\"-package__score\")\n\n elements = Element.objects.filter(\n feature__in=features, grid_package__in=grid_packages\n )\n\n element_map = build_element_map(elements)\n\n # These attributes are how we determine what is displayed in the grid\n default_attributes = [\n (\"repo_description\", \"Description\"),\n (\"category\", \"Category\"),\n (\"pypi_downloads\", \"Downloads\"),\n (\"last_updated\", \"Last Updated\"),\n (\"pypi_version\", \"Version\"),\n (\"repo\", \"Repo\"),\n (\"commits_over_52\", \"Commits\"),\n (\"repo_watchers\", \"Stars\"),\n (\"repo_forks\", \"Forks\"),\n (\"participant_list\", \"Participants\"),\n (\"license_latest\", \"License\"),\n ]\n\n return render(\n request,\n template_name,\n {\n \"filters\": json.dumps(sorted(filters.items()), separators=(\",\", \":\")),\n \"grid\": grid,\n \"features\": features,\n \"grid_packages\": grid_packages,\n \"attributes\": default_attributes,\n \"elements\": element_map,\n },\n )\n\n\ndef grid_detail_landscape(\n request, slug, template_name=\"grid/grid_detail_landscape.html\"\n):\n \"\"\"displays a grid in detail\n\n Template context:\n\n * ``grid`` - the grid object\n * ``elements`` - elements of the grid\n * ``features`` - feature set used in the grid\n * ``grid_packages`` - packages involved in the current grid\n \"\"\"\n\n return grid_detail(request, slug, template_name=\"grid/grid_detail_landscape.html\")\n\n\nclass GridListAPIView(ListAPIView):\n model = Grid\n paginate_by = 20\n\n\nclass GridDetailAPIView(RetrieveAPIView):\n model = Grid\n\n\ndef grid_timesheet(request, slug, template_name=\"grid/grid_timesheet.html\"):\n grid = get_object_or_404(Grid, slug=slug)\n grid_packages = grid.grid_packages.order_by(\"-package__modified\").select_related()\n\n return render(\n request,\n template_name,\n {\n \"grid\": grid,\n \"grid_packages\": grid_packages,\n },\n )\n",
"path": "grid/views.py"
}
] | [
{
"content": "\"\"\"views for the :mod:`grid` app\"\"\"\n\nimport json\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.db.models import Count, Q\nfrom django.http import HttpResponseRedirect, Http404, HttpResponseForbidden\nfrom django.shortcuts import get_object_or_404, render\nfrom django.urls import reverse\nfrom django_tables2 import SingleTableView\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView\n\nfrom grid.forms import ElementForm, FeatureForm, GridForm, GridPackageForm\nfrom grid.models import Element, Feature, Grid, GridPackage\nfrom grid.tables import GridTable\nfrom package.models import Package\nfrom package.forms import PackageForm\nfrom package.views import repo_data_for_js\n\n\ndef build_element_map(elements):\n # Horrifying two-level dict due to needing to use hash() function later\n element_map = {}\n for element in elements:\n element_map.setdefault(element.feature_id, {})\n element_map[element.feature_id][element.grid_package_id] = element\n return element_map\n\n\nclass GridListView(SingleTableView):\n table_class = GridTable\n template_name = \"grid/grids.html\"\n paginate_by = 100\n\n def get_queryset(self):\n return (\n Grid.objects.filter()\n .prefetch_related(\"feature_set\")\n .annotate(gridpackage_count=Count(\"gridpackage\"))\n .filter(gridpackage_count__gt=0)\n .order_by(\"-modified\", \"title\")\n )\n\n\n@login_required\ndef add_grid(request, template_name=\"grid/update_grid.html\"):\n \"\"\"Creates a new grid, requires user to be logged in.\n Works for both GET and POST request methods\n\n Template context:\n\n * ``form`` - an instance of :class:`~app.grid.forms.GridForm`\n \"\"\"\n\n if not request.user.profile.can_add_grid:\n return HttpResponseForbidden(\"permission denied\")\n\n new_grid = Grid()\n form = GridForm(request.POST or None, instance=new_grid)\n\n if form.is_valid():\n new_grid = form.save()\n return HttpResponseRedirect(reverse(\"grid\", kwargs={\"slug\": new_grid.slug}))\n\n return render(request, template_name, {\"form\": form})\n\n\n@login_required\ndef edit_grid(request, slug, template_name=\"grid/update_grid.html\"):\n \"\"\"View to modify the grid, handles GET and POST requests.\n This view requires user to be logged in.\n\n Template context:\n\n * ``form`` - instance of :class:`grid.forms.GridForm`\n \"\"\"\n\n if not request.user.profile.can_edit_grid:\n return HttpResponseForbidden(\"permission denied\")\n\n grid = get_object_or_404(Grid, slug=slug)\n form = GridForm(request.POST or None, instance=grid)\n\n if form.is_valid():\n grid = form.save()\n message = \"Grid has been edited\"\n messages.add_message(request, messages.INFO, message)\n return HttpResponseRedirect(reverse(\"grid\", kwargs={\"slug\": grid.slug}))\n return render(request, template_name, {\"form\": form, \"grid\": grid})\n\n\n@login_required\ndef add_feature(request, grid_slug, template_name=\"grid/update_feature.html\"):\n \"\"\"Adds a feature to the grid, accepts GET and POST requests.\n\n Requires user to be logged in\n\n Template context:\n\n * ``form`` - instance of :class:`grid.forms.FeatureForm` form\n * ``grid`` - instance of :class:`grid.models.Grid` model\n \"\"\"\n\n if not request.user.profile.can_add_grid_feature:\n return HttpResponseForbidden(\"permission denied\")\n\n grid = get_object_or_404(Grid, slug=grid_slug)\n form = FeatureForm(request.POST or None)\n\n if form.is_valid():\n feature = form.save(commit=False)\n feature.grid = grid\n feature.save()\n return HttpResponseRedirect(reverse(\"grid\", kwargs={\"slug\": feature.grid.slug}))\n\n return render(request, template_name, {\"form\": form, \"grid\": grid})\n\n\n@login_required\ndef edit_feature(request, id, template_name=\"grid/update_feature.html\"):\n \"\"\"edits feature on a grid - this view has the same\n semantics as :func:`grid.views.add_feature`.\n\n Requires the user to be logged in.\n \"\"\"\n\n if not request.user.profile.can_edit_grid_feature:\n return HttpResponseForbidden(\"permission denied\")\n\n feature = get_object_or_404(Feature, id=id)\n form = FeatureForm(request.POST or None, instance=feature)\n\n if form.is_valid():\n feature = form.save()\n return HttpResponseRedirect(reverse(\"grid\", kwargs={\"slug\": feature.grid.slug}))\n\n return render(request, template_name, {\"form\": form, \"grid\": feature.grid})\n\n\n@permission_required(\"grid.delete_feature\")\ndef delete_feature(request, id, template_name=\"grid/edit_feature.html\"):\n # do not need to check permission via profile because\n # we default to being strict about deleting\n \"\"\"deletes a feature from the grid, ``id`` is id of the\n :class:`grid.models.Feature` model that is to be deleted\n\n Requires permission `grid.delete_feature`.\n\n Redirects to the parent :func:`grid.views.grid_detail`\n \"\"\"\n\n feature = get_object_or_404(Feature, id=id)\n Element.objects.filter(feature=feature).delete()\n feature.delete()\n\n return HttpResponseRedirect(reverse(\"grid\", kwargs={\"slug\": feature.grid.slug}))\n\n\n@permission_required(\"grid.delete_gridpackage\")\ndef delete_grid_package(request, id, template_name=\"grid/edit_feature.html\"):\n \"\"\"Deletes package from the grid, ``id`` is the id of the\n :class:`grid.models.GridPackage` instance\n\n Requires permission ``grid.delete_gridpackage``.\n\n Redirects to :func:`grid.views.grid_detail`.\n \"\"\"\n\n # do not need to check permission via profile because\n # we default to being strict about deleting\n grid_package = get_object_or_404(GridPackage, id=id)\n grid_package.grid.clear_detail_template_cache()\n Element.objects.filter(grid_package=grid_package).delete()\n grid_package.delete()\n\n return HttpResponseRedirect(\n reverse(\"grid\", kwargs={\"slug\": grid_package.grid.slug})\n )\n\n\n@login_required\ndef edit_element(\n request, feature_id, package_id, template_name=\"grid/edit_element.html\"\n):\n\n if not request.user.profile.can_edit_grid_element:\n return HttpResponseForbidden(\"permission denied\")\n\n feature = get_object_or_404(Feature, pk=feature_id)\n grid_package = get_object_or_404(GridPackage, pk=package_id)\n\n # Sanity check to make sure both the feature and grid_package are related to\n # the same grid!\n if feature.grid_id != grid_package.grid_id:\n raise Http404\n\n element, created = Element.objects.get_or_create(\n grid_package=grid_package, feature=feature\n )\n\n form = ElementForm(request.POST or None, instance=element)\n\n if form.is_valid():\n element = form.save()\n return HttpResponseRedirect(reverse(\"grid\", kwargs={\"slug\": feature.grid.slug}))\n\n return render(\n request,\n template_name,\n {\n \"form\": form,\n \"feature\": feature,\n \"package\": grid_package.package,\n \"grid\": feature.grid,\n },\n )\n\n\n@login_required\ndef add_grid_package(request, grid_slug, template_name=\"grid/add_grid_package.html\"):\n \"\"\"Add an existing package to this grid.\"\"\"\n\n if not request.user.profile.can_add_grid_package:\n return HttpResponseForbidden(\"permission denied\")\n\n grid = get_object_or_404(Grid, slug=grid_slug)\n grid_package = GridPackage()\n form = GridPackageForm(request.POST or None, instance=grid_package)\n\n if form.is_valid():\n package = get_object_or_404(Package, id=request.POST[\"package\"])\n try:\n GridPackage.objects.get(grid=grid, package=package)\n message = \"Sorry, but '%s' is already in this grid.\" % package.title\n messages.add_message(request, messages.ERROR, message)\n except GridPackage.DoesNotExist:\n grid_package = GridPackage(grid=grid, package=package)\n grid_package.save()\n grid.clear_detail_template_cache()\n redirect = request.POST.get(\"redirect\", \"\")\n if redirect:\n return HttpResponseRedirect(redirect)\n\n return HttpResponseRedirect(reverse(\"grid\", kwargs={\"slug\": grid.slug}))\n\n return render(request, template_name, {\"form\": form, \"grid\": grid})\n\n\n@login_required\ndef add_new_grid_package(request, grid_slug, template_name=\"package/package_form.html\"):\n \"\"\"Add a package to a grid that isn't yet represented on the site.\"\"\"\n\n if not request.user.profile.can_add_grid_package:\n return HttpResponseForbidden(\"permission denied\")\n\n grid = get_object_or_404(Grid, slug=grid_slug)\n\n new_package = Package()\n form = PackageForm(request.POST or None, instance=new_package)\n\n if form.is_valid():\n new_package = form.save()\n GridPackage.objects.create(grid=grid, package=new_package)\n return HttpResponseRedirect(reverse(\"grid\", kwargs={\"slug\": grid_slug}))\n\n return render(\n request,\n template_name,\n {\"form\": form, \"repo_data\": repo_data_for_js(), \"action\": \"add\"},\n )\n\n\ndef ajax_grid_list(request, template_name=\"grid/ajax_grid_list.html\"):\n q = request.GET.get(\"q\", \"\")\n grids = []\n if q:\n grids = Grid.objects.filter(title__istartswith=q)\n package_id = request.GET.get(\"package_id\", \"\")\n if package_id:\n grids = grids.exclude(gridpackage__package__id=package_id)\n return render(request, template_name, {\"grids\": grids})\n\n\ndef grid_detail(request, slug, template_name=\"grid/grid_detail.html\"):\n \"\"\"displays a grid in detail\n\n Template context:\n\n * ``grid`` - the grid object\n * ``elements`` - elements of the grid\n * ``features`` - feature set used in the grid\n * ``grid_packages`` - packages involved in the current grid\n \"\"\"\n grid = get_object_or_404(Grid, slug=slug)\n\n # features = grid.feature_set.select_related(None)\n features = Feature.objects.filter(grid=grid)\n\n filters = {\n \"python3\": request.GET.get(\"python3\") == \"1\",\n \"stable\": request.GET.get(\"stable\") == \"1\",\n }\n\n grid_packages = grid.grid_packages.select_related(\"package\").filter(\n package__score__gte=max(0, settings.PACKAGE_SCORE_MIN)\n )\n\n if filters.get(\"python3\"):\n grid_packages = grid_packages.filter(package__version__supports_python3=True)\n\n if filters.get(\"stable\"):\n grid_packages = grid_packages.filter(package__version__development_status=5)\n\n grid_packages = grid_packages.order_by(\"-package__score\")\n\n elements = Element.objects.filter(\n feature__in=features, grid_package__in=grid_packages\n )\n\n element_map = build_element_map(elements)\n\n # These attributes are how we determine what is displayed in the grid\n default_attributes = [\n (\"repo_description\", \"Description\"),\n (\"category\", \"Category\"),\n (\"pypi_downloads\", \"Downloads\"),\n (\"last_updated\", \"Last Updated\"),\n (\"pypi_version\", \"Version\"),\n (\"repo\", \"Repo\"),\n (\"commits_over_52\", \"Commits\"),\n (\"repo_watchers\", \"Stars\"),\n (\"repo_forks\", \"Forks\"),\n (\"participant_list\", \"Participants\"),\n (\"license_latest\", \"License\"),\n ]\n\n return render(\n request,\n template_name,\n {\n \"filters\": json.dumps(sorted(filters.items()), separators=(\",\", \":\")),\n \"grid\": grid,\n \"features\": features,\n \"grid_packages\": grid_packages,\n \"attributes\": default_attributes,\n \"elements\": element_map,\n },\n )\n\n\ndef grid_detail_landscape(\n request, slug, template_name=\"grid/grid_detail_landscape.html\"\n):\n \"\"\"displays a grid in detail\n\n Template context:\n\n * ``grid`` - the grid object\n * ``elements`` - elements of the grid\n * ``features`` - feature set used in the grid\n * ``grid_packages`` - packages involved in the current grid\n \"\"\"\n\n return grid_detail(request, slug, template_name=\"grid/grid_detail_landscape.html\")\n\n\nclass GridListAPIView(ListAPIView):\n model = Grid\n paginate_by = 20\n\n\nclass GridDetailAPIView(RetrieveAPIView):\n model = Grid\n\n\ndef grid_timesheet(request, slug, template_name=\"grid/grid_timesheet.html\"):\n grid = get_object_or_404(Grid, slug=slug)\n grid_packages = grid.grid_packages.order_by(\"-package__modified\").select_related()\n\n return render(\n request,\n template_name,\n {\n \"grid\": grid,\n \"grid_packages\": grid_packages,\n },\n )\n",
"path": "grid/views.py"
}
] | diff --git a/grid/views.py b/grid/views.py
index d76c9f938..2f9a25dc8 100644
--- a/grid/views.py
+++ b/grid/views.py
@@ -304,7 +304,7 @@ def grid_detail(request, slug, template_name="grid/grid_detail.html"):
}
grid_packages = grid.grid_packages.select_related("package").filter(
- package__score__gt=max(0, settings.PACKAGE_SCORE_MIN)
+ package__score__gte=max(0, settings.PACKAGE_SCORE_MIN)
)
if filters.get("python3"):
diff --git a/package/tests/test_repos.py b/package/tests/test_repos.py
index 49176fbfd..0e214cc63 100644
--- a/package/tests/test_repos.py
+++ b/package/tests/test_repos.py
@@ -151,7 +151,8 @@ def test_base_handler_get_repo_for_repo_url():
sebpiq/spiteat/
schinckel/django-timedelta-field/
http://projects.unbit.it/hg/uwsgi
-http://www.dataportal.it"""
+http://www.dataportal.it
+https://hg.code.netlandish.com/~petersanchez/django-impersonate"""
for sample in samples.split("\n"):
assert isinstance(get_repo_for_repo_url(sample), UnsupportedHandler)
|
zigpy__zha-device-handlers-1073 | [Device Support Request] Tesla Smart Thermostatic Valve _TZE200_husqqvux TS0601
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
I'm trying to add _TZE200_husqqvux TS0601 (Tesla Smart Thermostatic Valve) and cant see any functions like Climate entity. (No entities at all) - I Tried to write in the files by hand, but not seems to work.
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
Plug and Play support
**Device signature - this can be acquired by removing the device from ZHA and pairing it again from the add devices screen. Be sure to add the entire content of the log panel after pairing the device to a code block below this line.**
{
"node_descriptor": "NodeDescriptor(logical_type=<LogicalType.EndDevice: 2>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress: 128>, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.NONE: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=True, *is_full_function_device=False, *is_mains_powered=False, *is_receiver_on_when_idle=False, *is_router=False, *is_security_capable=False)",
"endpoints": {
"1": {
"profile_id": 260,
"device_type": "0x0051",
"in_clusters": [
"0x0000",
"0x0004",
"0x0005",
"0xef00"
],
"out_clusters": [
"0x000a",
"0x0019"
]
}
},
"manufacturer": "_TZE200_husqqvux",
"model": "TS0601",
"class": "zigpy.device.Device"
}
**Additional context**
Add any other context or screenshots about the feature request here.
| [
{
"content": "\"\"\"Map from manufacturer to standard clusters for thermostatic valves.\"\"\"\nimport logging\nfrom typing import Optional, Union\n\nfrom zigpy.profiles import zha\nimport zigpy.types as t\nfrom zigpy.zcl import foundation\nfrom zigpy.zcl.clusters.general import Basic, Groups, Identify, OnOff, Ota, Scenes, Time\nfrom zigpy.zcl.clusters.hvac import Thermostat\n\nfrom zhaquirks import Bus, LocalDataCluster\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\nfrom zhaquirks.tuya import (\n TuyaManufClusterAttributes,\n TuyaPowerConfigurationCluster,\n TuyaThermostat,\n TuyaThermostatCluster,\n TuyaUserInterfaceCluster,\n)\n\n# info from https://github.com/Koenkk/zigbee-herdsman-converters/blob/master/converters/common.js#L113\n# and https://github.com/Koenkk/zigbee-herdsman-converters/blob/master/converters/fromZigbee.js#L362\nSITERWELL_CHILD_LOCK_ATTR = 0x0107 # [0] unlocked [1] child-locked\nSITERWELL_WINDOW_DETECT_ATTR = 0x0112 # [0] inactive [1] active\nSITERWELL_VALVE_DETECT_ATTR = 0x0114 # [0] do not report [1] report\nSITERWELL_VALVE_STATE_ATTR = 0x026D # [0,0,0,55] opening percentage\nSITERWELL_TARGET_TEMP_ATTR = 0x0202 # [0,0,0,210] target room temp (decidegree)\nSITERWELL_TEMPERATURE_ATTR = 0x0203 # [0,0,0,200] current room temp (decidegree)\nSITERWELL_BATTERY_ATTR = 0x0215 # [0,0,0,98] battery charge\nSITERWELL_MODE_ATTR = 0x0404 # [0] off [1] scheduled [2] manual\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass SiterwellManufCluster(TuyaManufClusterAttributes):\n \"\"\"Manufacturer Specific Cluster of some thermostatic valves.\"\"\"\n\n manufacturer_attributes = {\n SITERWELL_CHILD_LOCK_ATTR: (\"child_lock\", t.uint8_t),\n SITERWELL_WINDOW_DETECT_ATTR: (\"window_detection\", t.uint8_t),\n SITERWELL_VALVE_DETECT_ATTR: (\"valve_detect\", t.uint8_t),\n SITERWELL_VALVE_STATE_ATTR: (\"valve_state\", t.uint32_t),\n SITERWELL_TARGET_TEMP_ATTR: (\"target_temperature\", t.uint32_t),\n SITERWELL_TEMPERATURE_ATTR: (\"temperature\", t.uint32_t),\n SITERWELL_BATTERY_ATTR: (\"battery\", t.uint32_t),\n SITERWELL_MODE_ATTR: (\"mode\", t.uint8_t),\n }\n\n TEMPERATURE_ATTRS = {\n SITERWELL_TEMPERATURE_ATTR: \"local_temp\",\n SITERWELL_TARGET_TEMP_ATTR: \"occupied_heating_setpoint\",\n }\n\n def _update_attribute(self, attrid, value):\n super()._update_attribute(attrid, value)\n if attrid in self.TEMPERATURE_ATTRS:\n self.endpoint.device.thermostat_bus.listener_event(\n \"temperature_change\",\n self.TEMPERATURE_ATTRS[attrid],\n value * 10, # decidegree to centidegree\n )\n elif attrid == SITERWELL_MODE_ATTR:\n self.endpoint.device.thermostat_bus.listener_event(\"mode_change\", value)\n self.endpoint.device.thermostat_bus.listener_event(\n \"state_change\", value > 0\n )\n elif attrid == SITERWELL_VALVE_STATE_ATTR:\n self.endpoint.device.thermostat_bus.listener_event(\"state_change\", value)\n elif attrid == SITERWELL_CHILD_LOCK_ATTR:\n mode = 1 if value else 0\n self.endpoint.device.ui_bus.listener_event(\"child_lock_change\", mode)\n elif attrid == SITERWELL_BATTERY_ATTR:\n self.endpoint.device.battery_bus.listener_event(\"battery_change\", value)\n\n\nclass SiterwellThermostat(TuyaThermostatCluster):\n \"\"\"Thermostat cluster for some thermostatic valves.\"\"\"\n\n def map_attribute(self, attribute, value):\n \"\"\"Map standardized attribute value to dict of manufacturer values.\"\"\"\n\n if attribute == \"occupied_heating_setpoint\":\n # centidegree to decidegree\n return {SITERWELL_TARGET_TEMP_ATTR: round(value / 10)}\n if attribute in (\"system_mode\", \"programing_oper_mode\"):\n if attribute == \"system_mode\":\n system_mode = value\n oper_mode = self._attr_cache.get(\n self.attridx[\"programing_oper_mode\"],\n self.ProgrammingOperationMode.Simple,\n )\n else:\n system_mode = self._attr_cache.get(\n self.attridx[\"system_mode\"], self.SystemMode.Heat\n )\n oper_mode = value\n if system_mode == self.SystemMode.Off:\n return {SITERWELL_MODE_ATTR: 0}\n if system_mode == self.SystemMode.Heat:\n if oper_mode == self.ProgrammingOperationMode.Schedule_programming_mode:\n return {SITERWELL_MODE_ATTR: 1}\n if oper_mode == self.ProgrammingOperationMode.Simple:\n return {SITERWELL_MODE_ATTR: 2}\n self.error(\"Unsupported value for ProgrammingOperationMode\")\n else:\n self.error(\"Unsupported value for SystemMode\")\n\n def mode_change(self, value):\n \"\"\"System Mode change.\"\"\"\n if value == 0:\n self._update_attribute(self.attridx[\"system_mode\"], self.SystemMode.Off)\n return\n\n if value == 1:\n mode = self.ProgrammingOperationMode.Schedule_programming_mode\n else:\n mode = self.ProgrammingOperationMode.Simple\n\n self._update_attribute(self.attridx[\"system_mode\"], self.SystemMode.Heat)\n self._update_attribute(self.attridx[\"programing_oper_mode\"], mode)\n\n\nclass SiterwellUserInterface(TuyaUserInterfaceCluster):\n \"\"\"HVAC User interface cluster for tuya electric heating thermostats.\"\"\"\n\n _CHILD_LOCK_ATTR = SITERWELL_CHILD_LOCK_ATTR\n\n\n# info from https://github.com/Koenkk/zigbee-herdsman-converters/blob/master/lib/tuya.js\n# and https://github.com/Koenkk/zigbee-herdsman-converters/blob/master/converters/fromZigbee.js#L2777\nMOES_TARGET_TEMP_ATTR = 0x0202 # target room temp (decidegree)\nMOES_TEMPERATURE_ATTR = 0x0203 # current room temp (decidegree)\nMOES_MODE_ATTR = 0x0404 # [0] away [1] scheduled [2] manual [3] comfort [4] eco [5] boost [6] complex\nMOES_CHILD_LOCK_ATTR = 0x0107 # [0] unlocked [1] child-locked\nMOES_VALVE_DETECT_ATTR = 0x0114 # [0] do not report [1] report\nMOES_TEMP_CALIBRATION_ATTR = 0x022C # temperature calibration (decidegree)\nMOES_MIN_TEMPERATURE_ATTR = 0x0266 # minimum limit of temperature setting (decidegree)\nMOES_MAX_TEMPERATURE_ATTR = 0x0267 # maximum limit of temperature setting (decidegree)\nMOES_WINDOW_DETECT_ATTR = 0x0068 # [0,35,5] on/off, temperature, operating time (min)\nMOES_BOOST_TIME_ATTR = 0x0269 # BOOST mode operating time in (sec)\nMOES_FORCE_VALVE_ATTR = 0x046A # [0] normal [1] open [2] close\nMOES_COMFORT_TEMP_ATTR = 0x026B # comfort mode temperaure (decidegree)\nMOES_ECO_TEMP_ATTR = 0x026C # eco mode temperature (decidegree)\nMOES_VALVE_STATE_ATTR = 0x026D # opening percentage\nMOES_BATTERY_LOW_ATTR = 0x016E # battery low warning\nMOES_WEEK_FORMAT_ATTR = 0x046F # [0] 5 days [1] 6 days, [2] 7 days\nMOES_AWAY_TEMP_ATTR = 0x0272 # away mode temperature (decidegree)\nMOES_AUTO_LOCK_ATTR = 0x0174 # [0] auto [1] manual\nMOES_AWAY_DAYS_ATTR = 0x0275 # away mode duration (days)\n\n# schedule [6,0,20,8,0,15,11,30,15,12,30,15,17,30,20,22,0,15]\n# 6:00 - 20*, 8:00 - 15*, 11:30 - 15*, 12:30 - 15*, 17:30 - 20*, 22:00 - 15*\n# Top bits in hours have special meaning\n# 8: ??\n# 7: Current schedule indicator\nMOES_SCHEDULE_WORKDAY_ATTR = 0x0070\nMOES_SCHEDULE_WEEKEND_ATTR = 0x0071\n\n\nclass data144(t.FixedList, item_type=t.uint8_t, length=18):\n \"\"\"General data, Discrete, 144 bit.\"\"\"\n\n pass\n\n\nclass MoesManufCluster(TuyaManufClusterAttributes):\n \"\"\"Manufacturer Specific Cluster of some thermostatic valves.\"\"\"\n\n set_time_offset = 1970\n\n manufacturer_attributes = {\n MOES_CHILD_LOCK_ATTR: (\"child_lock\", t.uint8_t),\n MOES_WINDOW_DETECT_ATTR: (\"window_detection\", t.data24),\n MOES_VALVE_DETECT_ATTR: (\"valve_detect\", t.uint8_t),\n MOES_VALVE_STATE_ATTR: (\"valve_state\", t.uint32_t),\n MOES_TARGET_TEMP_ATTR: (\"target_temperature\", t.uint32_t),\n MOES_TEMPERATURE_ATTR: (\"temperature\", t.uint32_t),\n MOES_MODE_ATTR: (\"mode\", t.uint8_t),\n MOES_TEMP_CALIBRATION_ATTR: (\"temperature_calibration\", t.int32s),\n MOES_MIN_TEMPERATURE_ATTR: (\"min_temperature\", t.uint32_t),\n MOES_MAX_TEMPERATURE_ATTR: (\"max_temperature\", t.uint32_t),\n MOES_BOOST_TIME_ATTR: (\"boost_duration_seconds\", t.uint32_t),\n MOES_FORCE_VALVE_ATTR: (\"valve_force_state\", t.uint8_t),\n MOES_COMFORT_TEMP_ATTR: (\"comfort_mode_temperature\", t.uint32_t),\n MOES_ECO_TEMP_ATTR: (\"eco_mode_temperature\", t.uint32_t),\n MOES_BATTERY_LOW_ATTR: (\"battery_low\", t.uint8_t),\n MOES_WEEK_FORMAT_ATTR: (\"week_format\", t.uint8_t),\n MOES_AWAY_TEMP_ATTR: (\"away_mode_temperature\", t.uint32_t),\n MOES_AUTO_LOCK_ATTR: (\"auto_lock\", t.uint8_t),\n MOES_AWAY_DAYS_ATTR: (\"away_duration_days\", t.uint32_t),\n MOES_SCHEDULE_WORKDAY_ATTR: (\"workday_schedule\", data144),\n MOES_SCHEDULE_WEEKEND_ATTR: (\"weekend_schedule\", data144),\n }\n\n DIRECT_MAPPED_ATTRS = {\n MOES_TEMPERATURE_ATTR: (\"local_temp\", lambda value: value * 10),\n MOES_TARGET_TEMP_ATTR: (\"occupied_heating_setpoint\", lambda value: value * 10),\n MOES_AWAY_TEMP_ATTR: (\"unoccupied_heating_setpoint\", lambda value: value * 100),\n MOES_COMFORT_TEMP_ATTR: (\"comfort_heating_setpoint\", lambda value: value * 100),\n MOES_ECO_TEMP_ATTR: (\"eco_heating_setpoint\", lambda value: value * 100),\n MOES_TEMP_CALIBRATION_ATTR: (\n \"local_temperature_calibration\",\n lambda value: value * 10,\n ),\n MOES_MIN_TEMPERATURE_ATTR: (\n \"min_heat_setpoint_limit\",\n lambda value: value * 100,\n ),\n MOES_MAX_TEMPERATURE_ATTR: (\n \"max_heat_setpoint_limit\",\n lambda value: value * 100,\n ),\n MOES_VALVE_STATE_ATTR: (\"valve_open_percentage\", None),\n MOES_AWAY_DAYS_ATTR: (\"unoccupied_duration_days\", None),\n MOES_BOOST_TIME_ATTR: (\"boost_duration_seconds\", None),\n MOES_MODE_ATTR: (\"operation_preset\", None),\n MOES_WEEK_FORMAT_ATTR: (\"work_days\", None),\n MOES_FORCE_VALVE_ATTR: (\"valve_force_state\", None),\n }\n\n def _update_attribute(self, attrid, value):\n super()._update_attribute(attrid, value)\n if attrid in self.DIRECT_MAPPED_ATTRS:\n self.endpoint.device.thermostat_bus.listener_event(\n \"temperature_change\",\n self.DIRECT_MAPPED_ATTRS[attrid][0],\n value\n if self.DIRECT_MAPPED_ATTRS[attrid][1] is None\n else self.DIRECT_MAPPED_ATTRS[attrid][1](\n value\n ), # decidegree to centidegree\n )\n elif attrid in (MOES_SCHEDULE_WORKDAY_ATTR, MOES_SCHEDULE_WEEKEND_ATTR):\n self.endpoint.device.thermostat_bus.listener_event(\n \"schedule_change\", attrid, value\n )\n\n if attrid == MOES_WINDOW_DETECT_ATTR:\n self.endpoint.device.window_detection_bus.listener_event(\n \"window_detect_change\", value\n )\n elif attrid == MOES_MODE_ATTR:\n self.endpoint.device.thermostat_bus.listener_event(\"mode_change\", value)\n elif attrid == MOES_VALVE_STATE_ATTR:\n self.endpoint.device.thermostat_bus.listener_event(\"state_change\", value)\n elif attrid == MOES_CHILD_LOCK_ATTR:\n mode = 1 if value else 0\n self.endpoint.device.ui_bus.listener_event(\"child_lock_change\", mode)\n elif attrid == MOES_AUTO_LOCK_ATTR:\n mode = 1 if value else 0\n self.endpoint.device.ui_bus.listener_event(\"autolock_change\", mode)\n elif attrid == MOES_BATTERY_LOW_ATTR:\n self.endpoint.device.battery_bus.listener_event(\n \"battery_change\", 5 if value else 100\n )\n\n\nclass MoesThermostat(TuyaThermostatCluster):\n \"\"\"Thermostat cluster for some thermostatic valves.\"\"\"\n\n class Preset(t.enum8):\n \"\"\"Working modes of the thermostat.\"\"\"\n\n Away = 0x00\n Schedule = 0x01\n Manual = 0x02\n Comfort = 0x03\n Eco = 0x04\n Boost = 0x05\n Complex = 0x06\n\n class WorkDays(t.enum8):\n \"\"\"Workday configuration for scheduler operation mode.\"\"\"\n\n MonToFri = 0x00\n MonToSat = 0x01\n MonToSun = 0x02\n\n class ForceValveState(t.enum8):\n \"\"\"Force valve state option.\"\"\"\n\n Normal = 0x00\n Open = 0x01\n Close = 0x02\n\n _CONSTANT_ATTRIBUTES = {\n 0x001B: Thermostat.ControlSequenceOfOperation.Heating_Only,\n 0x001C: Thermostat.SystemMode.Heat,\n }\n\n manufacturer_attributes = {\n 0x4000: (\"comfort_heating_setpoint\", t.int16s),\n 0x4001: (\"eco_heating_setpoint\", t.int16s),\n 0x4002: (\"operation_preset\", Preset),\n 0x4003: (\"work_days\", WorkDays),\n 0x4004: (\"valve_open_percentage\", t.uint8_t),\n 0x4005: (\"boost_duration_seconds\", t.uint32_t),\n 0x4006: (\"valve_force_state\", ForceValveState),\n 0x4007: (\"unoccupied_duration_days\", t.uint32_t),\n 0x4110: (\"workday_schedule_1_hour\", t.uint8_t),\n 0x4111: (\"workday_schedule_1_minute\", t.uint8_t),\n 0x4112: (\"workday_schedule_1_temperature\", t.int16s),\n 0x4120: (\"workday_schedule_2_hour\", t.uint8_t),\n 0x4121: (\"workday_schedule_2_minute\", t.uint8_t),\n 0x4122: (\"workday_schedule_2_temperature\", t.int16s),\n 0x4130: (\"workday_schedule_3_hour\", t.uint8_t),\n 0x4131: (\"workday_schedule_3_minute\", t.uint8_t),\n 0x4132: (\"workday_schedule_3_temperature\", t.int16s),\n 0x4140: (\"workday_schedule_4_hour\", t.uint8_t),\n 0x4141: (\"workday_schedule_4_minute\", t.uint8_t),\n 0x4142: (\"workday_schedule_4_temperature\", t.int16s),\n 0x4150: (\"workday_schedule_5_hour\", t.uint8_t),\n 0x4151: (\"workday_schedule_5_minute\", t.uint8_t),\n 0x4152: (\"workday_schedule_5_temperature\", t.int16s),\n 0x4160: (\"workday_schedule_6_hour\", t.uint8_t),\n 0x4161: (\"workday_schedule_6_minute\", t.uint8_t),\n 0x4162: (\"workday_schedule_6_temperature\", t.int16s),\n 0x4210: (\"weekend_schedule_1_hour\", t.uint8_t),\n 0x4211: (\"weekend_schedule_1_minute\", t.uint8_t),\n 0x4212: (\"weekend_schedule_1_temperature\", t.int16s),\n 0x4220: (\"weekend_schedule_2_hour\", t.uint8_t),\n 0x4221: (\"weekend_schedule_2_minute\", t.uint8_t),\n 0x4222: (\"weekend_schedule_2_temperature\", t.int16s),\n 0x4230: (\"weekend_schedule_3_hour\", t.uint8_t),\n 0x4231: (\"weekend_schedule_3_minute\", t.uint8_t),\n 0x4232: (\"weekend_schedule_3_temperature\", t.int16s),\n 0x4240: (\"weekend_schedule_4_hour\", t.uint8_t),\n 0x4241: (\"weekend_schedule_4_minute\", t.uint8_t),\n 0x4242: (\"weekend_schedule_4_temperature\", t.int16s),\n 0x4250: (\"weekend_schedule_5_hour\", t.uint8_t),\n 0x4251: (\"weekend_schedule_5_minute\", t.uint8_t),\n 0x4252: (\"weekend_schedule_5_temperature\", t.int16s),\n 0x4260: (\"weekend_schedule_6_hour\", t.uint8_t),\n 0x4261: (\"weekend_schedule_6_minute\", t.uint8_t),\n 0x4262: (\"weekend_schedule_6_temperature\", t.int16s),\n }\n\n DIRECT_MAPPING_ATTRS = {\n \"occupied_heating_setpoint\": (\n MOES_TARGET_TEMP_ATTR,\n lambda value: round(value / 10),\n ),\n \"unoccupied_heating_setpoint\": (\n MOES_AWAY_TEMP_ATTR,\n lambda value: round(value / 100),\n ),\n \"comfort_heating_setpoint\": (\n MOES_COMFORT_TEMP_ATTR,\n lambda value: round(value / 100),\n ),\n \"eco_heating_setpoint\": (MOES_ECO_TEMP_ATTR, lambda value: round(value / 100)),\n \"min_heat_setpoint_limit\": (\n MOES_MIN_TEMPERATURE_ATTR,\n lambda value: round(value / 100),\n ),\n \"max_heat_setpoint_limit\": (\n MOES_MAX_TEMPERATURE_ATTR,\n lambda value: round(value / 100),\n ),\n \"local_temperature_calibration\": (\n MOES_TEMP_CALIBRATION_ATTR,\n lambda value: round(value / 10),\n ),\n \"work_days\": (MOES_WEEK_FORMAT_ATTR, None),\n \"operation_preset\": (MOES_MODE_ATTR, None),\n \"boost_duration_seconds\": (MOES_BOOST_TIME_ATTR, None),\n \"valve_force_state\": (MOES_FORCE_VALVE_ATTR, None),\n \"unoccupied_duration_days\": (MOES_AWAY_DAYS_ATTR, None),\n }\n\n WORKDAY_SCHEDULE_ATTRS = {\n \"workday_schedule_6_temperature\": 1500,\n \"workday_schedule_6_minute\": 0,\n \"workday_schedule_6_hour\": 22,\n \"workday_schedule_5_temperature\": 2000,\n \"workday_schedule_5_minute\": 30,\n \"workday_schedule_5_hour\": 17,\n \"workday_schedule_4_temperature\": 1500,\n \"workday_schedule_4_minute\": 30,\n \"workday_schedule_4_hour\": 12,\n \"workday_schedule_3_temperature\": 1500,\n \"workday_schedule_3_minute\": 30,\n \"workday_schedule_3_hour\": 11,\n \"workday_schedule_2_temperature\": 1500,\n \"workday_schedule_2_minute\": 0,\n \"workday_schedule_2_hour\": 8,\n \"workday_schedule_1_temperature\": 2000,\n \"workday_schedule_1_minute\": 0,\n \"workday_schedule_1_hour\": 6,\n }\n\n WEEKEND_SCHEDULE_ATTRS = {\n \"weekend_schedule_6_temperature\": 1500,\n \"weekend_schedule_6_minute\": 0,\n \"weekend_schedule_6_hour\": 22,\n \"weekend_schedule_5_temperature\": 2000,\n \"weekend_schedule_5_minute\": 30,\n \"weekend_schedule_5_hour\": 17,\n \"weekend_schedule_4_temperature\": 1500,\n \"weekend_schedule_4_minute\": 30,\n \"weekend_schedule_4_hour\": 12,\n \"weekend_schedule_3_temperature\": 1500,\n \"weekend_schedule_3_minute\": 30,\n \"weekend_schedule_3_hour\": 11,\n \"weekend_schedule_2_temperature\": 1500,\n \"weekend_schedule_2_minute\": 0,\n \"weekend_schedule_2_hour\": 8,\n \"weekend_schedule_1_temperature\": 2000,\n \"weekend_schedule_1_minute\": 0,\n \"weekend_schedule_1_hour\": 6,\n }\n\n def map_attribute(self, attribute, value):\n \"\"\"Map standardized attribute value to dict of manufacturer values.\"\"\"\n\n if attribute in self.DIRECT_MAPPING_ATTRS:\n return {\n self.DIRECT_MAPPING_ATTRS[attribute][0]: value\n if self.DIRECT_MAPPING_ATTRS[attribute][1] is None\n else self.DIRECT_MAPPING_ATTRS[attribute][1](value)\n }\n if attribute in (\"programing_oper_mode\", \"occupancy\"):\n if attribute == \"occupancy\":\n occupancy = value\n oper_mode = self._attr_cache.get(\n self.attridx[\"programing_oper_mode\"],\n self.ProgrammingOperationMode.Simple,\n )\n else:\n occupancy = self._attr_cache.get(\n self.attridx[\"occupancy\"], self.Occupancy.Occupied\n )\n oper_mode = value\n if occupancy == self.Occupancy.Unoccupied:\n return {MOES_MODE_ATTR: 0}\n if occupancy == self.Occupancy.Occupied:\n if oper_mode == self.ProgrammingOperationMode.Schedule_programming_mode:\n return {MOES_MODE_ATTR: 1}\n if oper_mode == self.ProgrammingOperationMode.Simple:\n return {MOES_MODE_ATTR: 2}\n if oper_mode == self.ProgrammingOperationMode.Economy_mode:\n return {MOES_MODE_ATTR: 4}\n self.error(\"Unsupported value for ProgrammingOperationMode\")\n else:\n self.error(\"Unsupported value for Occupancy\")\n if attribute == \"system_mode\":\n return {\n MOES_MODE_ATTR: self._attr_cache.get(\n self.attridx[\"operation_preset\"], 2\n )\n }\n if attribute in self.WORKDAY_SCHEDULE_ATTRS:\n data = data144()\n for num, (attr, default) in enumerate(self.WORKDAY_SCHEDULE_ATTRS.items()):\n\n if num % 3 == 0:\n if attr == attribute:\n val = round(value / 100)\n else:\n val = round(\n self._attr_cache.get(self.attridx[attr], default) / 100\n )\n else:\n if attr == attribute:\n val = value\n else:\n val = self._attr_cache.get(self.attridx[attr], default)\n\n data.append(val)\n return {MOES_SCHEDULE_WORKDAY_ATTR: data}\n if attribute in self.WEEKEND_SCHEDULE_ATTRS:\n data = data144()\n for num, (attr, default) in enumerate(self.WEEKEND_SCHEDULE_ATTRS.items()):\n\n if num % 3 == 0:\n if attr == attribute:\n val = round(value / 100)\n else:\n val = round(\n self._attr_cache.get(self.attridx[attr], default) / 100\n )\n else:\n if attr == attribute:\n val = value\n else:\n val = self._attr_cache.get(self.attridx[attr], default)\n\n data.append(val)\n return {MOES_SCHEDULE_WEEKEND_ATTR: data}\n\n def mode_change(self, value):\n \"\"\"System Mode change.\"\"\"\n if value == 0:\n prog_mode = self.ProgrammingOperationMode.Simple\n occupancy = self.Occupancy.Unoccupied\n elif value == 1:\n prog_mode = self.ProgrammingOperationMode.Schedule_programming_mode\n occupancy = self.Occupancy.Occupied\n elif value == 2:\n prog_mode = self.ProgrammingOperationMode.Simple\n occupancy = self.Occupancy.Occupied\n elif value == 3:\n prog_mode = self.ProgrammingOperationMode.Simple\n occupancy = self.Occupancy.Occupied\n elif value == 4:\n prog_mode = self.ProgrammingOperationMode.Economy_mode\n occupancy = self.Occupancy.Occupied\n elif value == 5:\n prog_mode = self.ProgrammingOperationMode.Simple\n occupancy = self.Occupancy.Occupied\n else:\n prog_mode = self.ProgrammingOperationMode.Simple\n occupancy = self.Occupancy.Occupied\n\n self._update_attribute(self.attridx[\"programing_oper_mode\"], prog_mode)\n self._update_attribute(self.attridx[\"occupancy\"], occupancy)\n\n def schedule_change(self, attr, value):\n \"\"\"Scheduler attribute change.\"\"\"\n\n if attr == MOES_SCHEDULE_WORKDAY_ATTR:\n self._update_attribute(\n self.attridx[\"workday_schedule_1_hour\"], value[17] & 0x3F\n )\n self._update_attribute(self.attridx[\"workday_schedule_1_minute\"], value[16])\n self._update_attribute(\n self.attridx[\"workday_schedule_1_temperature\"], value[15] * 100\n )\n self._update_attribute(\n self.attridx[\"workday_schedule_2_hour\"], value[14] & 0x3F\n )\n self._update_attribute(self.attridx[\"workday_schedule_2_minute\"], value[13])\n self._update_attribute(\n self.attridx[\"workday_schedule_2_temperature\"], value[12] * 100\n )\n self._update_attribute(\n self.attridx[\"workday_schedule_3_hour\"], value[11] & 0x3F\n )\n self._update_attribute(self.attridx[\"workday_schedule_3_minute\"], value[10])\n self._update_attribute(\n self.attridx[\"workday_schedule_3_temperature\"], value[9] * 100\n )\n self._update_attribute(\n self.attridx[\"workday_schedule_4_hour\"], value[8] & 0x3F\n )\n self._update_attribute(self.attridx[\"workday_schedule_4_minute\"], value[7])\n self._update_attribute(\n self.attridx[\"workday_schedule_4_temperature\"], value[6] * 100\n )\n self._update_attribute(\n self.attridx[\"workday_schedule_5_hour\"], value[5] & 0x3F\n )\n self._update_attribute(self.attridx[\"workday_schedule_5_minute\"], value[4])\n self._update_attribute(\n self.attridx[\"workday_schedule_5_temperature\"], value[3] * 100\n )\n self._update_attribute(\n self.attridx[\"workday_schedule_6_hour\"], value[2] & 0x3F\n )\n self._update_attribute(self.attridx[\"workday_schedule_6_minute\"], value[1])\n self._update_attribute(\n self.attridx[\"workday_schedule_6_temperature\"], value[0] * 100\n )\n elif attr == MOES_SCHEDULE_WEEKEND_ATTR:\n self._update_attribute(\n self.attridx[\"weekend_schedule_1_hour\"], value[17] & 0x3F\n )\n self._update_attribute(self.attridx[\"weekend_schedule_1_minute\"], value[16])\n self._update_attribute(\n self.attridx[\"weekend_schedule_1_temperature\"], value[15] * 100\n )\n self._update_attribute(\n self.attridx[\"weekend_schedule_2_hour\"], value[14] & 0x3F\n )\n self._update_attribute(self.attridx[\"weekend_schedule_2_minute\"], value[13])\n self._update_attribute(\n self.attridx[\"weekend_schedule_2_temperature\"], value[12] * 100\n )\n self._update_attribute(\n self.attridx[\"weekend_schedule_3_hour\"], value[11] & 0x3F\n )\n self._update_attribute(self.attridx[\"weekend_schedule_3_minute\"], value[10])\n self._update_attribute(\n self.attridx[\"weekend_schedule_3_temperature\"], value[9] * 100\n )\n self._update_attribute(\n self.attridx[\"weekend_schedule_4_hour\"], value[8] & 0x3F\n )\n self._update_attribute(self.attridx[\"weekend_schedule_4_minute\"], value[7])\n self._update_attribute(\n self.attridx[\"weekend_schedule_4_temperature\"], value[6] * 100\n )\n self._update_attribute(\n self.attridx[\"weekend_schedule_5_hour\"], value[5] & 0x3F\n )\n self._update_attribute(self.attridx[\"weekend_schedule_5_minute\"], value[4])\n self._update_attribute(\n self.attridx[\"weekend_schedule_5_temperature\"], value[3] * 100\n )\n self._update_attribute(\n self.attridx[\"weekend_schedule_6_hour\"], value[2] & 0x3F\n )\n self._update_attribute(self.attridx[\"weekend_schedule_6_minute\"], value[1])\n self._update_attribute(\n self.attridx[\"weekend_schedule_6_temperature\"], value[0] * 100\n )\n\n\nclass MoesUserInterface(TuyaUserInterfaceCluster):\n \"\"\"HVAC User interface cluster for tuya electric heating thermostats.\"\"\"\n\n _CHILD_LOCK_ATTR = MOES_CHILD_LOCK_ATTR\n\n manufacturer_attributes = {\n 0x5000: (\"auto_lock\", t.Bool),\n }\n\n def autolock_change(self, value):\n \"\"\"Automatic lock change.\"\"\"\n\n self._update_attribute(self.attridx[\"auto_lock\"], value)\n\n def map_attribute(self, attribute, value):\n \"\"\"Map standardized attribute value to dict of manufacturer values.\"\"\"\n\n if attribute == \"auto_lock\":\n return {MOES_AUTO_LOCK_ATTR: value}\n\n\nclass MoesWindowDetection(LocalDataCluster, OnOff):\n \"\"\"On/Off cluster for the window detection function of the electric heating thermostats.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Init.\"\"\"\n super().__init__(*args, **kwargs)\n self.endpoint.device.window_detection_bus.add_listener(self)\n\n manufacturer_attributes = {\n 0x6000: (\"window_detection_temperature\", t.int16s),\n 0x6001: (\"window_detection_timeout_minutes\", t.uint8_t),\n }\n\n def window_detect_change(self, value):\n \"\"\"Window detection change.\"\"\"\n\n self._update_attribute(\n self.attridx[\"window_detection_timeout_minutes\"], value[0]\n )\n self._update_attribute(\n self.attridx[\"window_detection_temperature\"], value[1] * 100\n )\n self._update_attribute(self.attridx[\"on_off\"], value[2])\n\n async def write_attributes(self, attributes, manufacturer=None):\n \"\"\"Defer attributes writing to the set_data tuya command.\"\"\"\n\n records = self._write_attr_records(attributes)\n\n if not records:\n return [[foundation.WriteAttributesStatusRecord(foundation.Status.SUCCESS)]]\n\n has_change = False\n data = t.data24()\n data.append(\n self._attr_cache.get(\n self.attridx[\"window_detection_timeout_minutes\"],\n 5,\n )\n )\n data.append(\n round(\n self._attr_cache.get(\n self.attridx[\"window_detection_temperature\"],\n 50,\n )\n / 100\n )\n )\n data.append(\n self._attr_cache.get(\n self.attridx[\"on_off\"],\n False,\n )\n )\n\n for record in records:\n attr_name = self.attributes[record.attrid][0]\n if attr_name == \"on_off\":\n data[2] = record.value.value\n has_change = True\n elif attr_name == \"window_detection_temperature\":\n data[1] = record.value.value / 100\n has_change = True\n elif attr_name == \"window_detection_timeout_minutes\":\n data[0] = record.value.value\n has_change = True\n\n if has_change:\n return await self.endpoint.tuya_manufacturer.write_attributes(\n {MOES_WINDOW_DETECT_ATTR: data}, manufacturer=manufacturer\n )\n\n return [\n [\n foundation.WriteAttributesStatusRecord(\n foundation.Status.FAILURE, r.attrid\n )\n for r in records\n ]\n ]\n\n async def command(\n self,\n command_id: Union[foundation.Command, int, t.uint8_t],\n *args,\n manufacturer: Optional[Union[int, t.uint16_t]] = None,\n expect_reply: bool = True,\n tsn: Optional[Union[int, t.uint8_t]] = None,\n ):\n \"\"\"Override the default Cluster command.\"\"\"\n\n if command_id in (0x0000, 0x0001, 0x0002):\n\n if command_id == 0x0000:\n value = False\n elif command_id == 0x0001:\n value = True\n else:\n attrid = self.attridx[\"on_off\"]\n success, _ = await self.read_attributes(\n (attrid,), manufacturer=manufacturer\n )\n try:\n value = success[attrid]\n except KeyError:\n return foundation.Status.FAILURE\n value = not value\n\n (res,) = await self.write_attributes(\n {\"on_off\": value},\n manufacturer=manufacturer,\n )\n return [command_id, res[0].status]\n\n return [command_id, foundation.Status.UNSUP_CLUSTER_COMMAND]\n\n\nZONNSMART_CHILD_LOCK_ATTR = 0x0128 # [0] unlocked [1] child-locked\nZONNSMART_WINDOW_DETECT_ATTR = 0x0108 # [0] inactive [1] active\nZONNSMART_TARGET_TEMP_ATTR = 0x0210 # [0,0,0,210] target room temp (decidegree)\nZONNSMART_TEMPERATURE_ATTR = 0x0218 # [0,0,0,200] current room temp (decidegree)\nZONNSMART_BATTERY_ATTR = 0x0223 # [0,0,0,98] battery charge\nZONNSMART_MODE_ATTR = (\n 0x0402 # [0] Scheduled/auto [1] manual [2] Holiday [3] HolidayReady\n)\nZONNSMART_HEATING_STOPPING = 0x016B # [0] inactive [1] active\nZONNSMART_BOOST_TIME_ATTR = 0x0265 # BOOST mode operating time in (sec)\nZONNSMART_UPTIME_TIME_ATTR = (\n 0x0024 # Seems to be the uptime attribute (sent hourly, increases) [0,200]\n)\n\n\nclass ZONNSMARTManufCluster(TuyaManufClusterAttributes):\n \"\"\"Manufacturer Specific Cluster of some thermostatic valves.\"\"\"\n\n manufacturer_attributes = {\n ZONNSMART_CHILD_LOCK_ATTR: (\"child_lock\", t.uint8_t),\n ZONNSMART_WINDOW_DETECT_ATTR: (\"window_detection\", t.uint8_t),\n ZONNSMART_TARGET_TEMP_ATTR: (\"target_temperature\", t.uint32_t),\n ZONNSMART_TEMPERATURE_ATTR: (\"temperature\", t.uint32_t),\n ZONNSMART_BATTERY_ATTR: (\"battery\", t.uint32_t),\n ZONNSMART_MODE_ATTR: (\"mode\", t.uint8_t),\n ZONNSMART_BOOST_TIME_ATTR: (\"boost_duration_seconds\", t.uint32_t),\n ZONNSMART_UPTIME_TIME_ATTR: (\"uptime\", t.uint32_t),\n ZONNSMART_HEATING_STOPPING: (\"heating_stop\", t.uint8_t),\n }\n\n DIRECT_MAPPED_ATTRS = {\n ZONNSMART_TEMPERATURE_ATTR: (\"local_temp\", lambda value: value * 10),\n ZONNSMART_TARGET_TEMP_ATTR: (\n \"occupied_heating_setpoint\",\n lambda value: value * 10,\n ),\n ZONNSMART_BOOST_TIME_ATTR: (\"boost_duration_seconds\", None),\n ZONNSMART_UPTIME_TIME_ATTR: (\"uptime_duration_hours\", None),\n }\n\n def _update_attribute(self, attrid, value):\n super()._update_attribute(attrid, value)\n if attrid in self.DIRECT_MAPPED_ATTRS:\n self.endpoint.device.thermostat_bus.listener_event(\n \"temperature_change\",\n self.DIRECT_MAPPED_ATTRS[attrid][0],\n value\n if self.DIRECT_MAPPED_ATTRS[attrid][1] is None\n else self.DIRECT_MAPPED_ATTRS[attrid][1](\n value\n ), # decidegree to centidegree\n )\n elif attrid == ZONNSMART_MODE_ATTR:\n self.endpoint.device.thermostat_bus.listener_event(\"mode_change\", value)\n elif attrid == ZONNSMART_HEATING_STOPPING:\n self.endpoint.device.thermostat_bus.listener_event(\n \"state_change\", value == 0\n )\n elif attrid == ZONNSMART_CHILD_LOCK_ATTR:\n mode = 1 if value else 0\n self.endpoint.device.ui_bus.listener_event(\"child_lock_change\", mode)\n elif attrid == ZONNSMART_BATTERY_ATTR:\n self.endpoint.device.battery_bus.listener_event(\"battery_change\", value)\n\n\nclass ZONNSMARTThermostat(TuyaThermostatCluster):\n \"\"\"Thermostat cluster for some thermostatic valves.\"\"\"\n\n DIRECT_MAPPING_ATTRS = {\n \"occupied_heating_setpoint\": (\n ZONNSMART_TARGET_TEMP_ATTR,\n lambda value: round(value / 10),\n ),\n \"operation_preset\": (ZONNSMART_MODE_ATTR, None),\n \"boost_duration_seconds\": (ZONNSMART_BOOST_TIME_ATTR, None),\n }\n\n def map_attribute(self, attribute, value):\n \"\"\"Map standardized attribute value to dict of manufacturer values.\"\"\"\n\n if attribute in self.DIRECT_MAPPING_ATTRS:\n return {\n self.DIRECT_MAPPING_ATTRS[attribute][0]: value\n if self.DIRECT_MAPPING_ATTRS[attribute][1] is None\n else self.DIRECT_MAPPING_ATTRS[attribute][1](value)\n }\n if attribute in (\"system_mode\", \"programing_oper_mode\"):\n if attribute == \"system_mode\":\n system_mode = value\n oper_mode = self._attr_cache.get(\n self.attridx[\"programing_oper_mode\"],\n self.ProgrammingOperationMode.Simple,\n )\n else:\n system_mode = self._attr_cache.get(\n self.attridx[\"system_mode\"], self.SystemMode.Heat\n )\n oper_mode = value\n if system_mode == self.SystemMode.Off:\n return {ZONNSMART_HEATING_STOPPING: 1}\n if system_mode == self.SystemMode.Heat:\n if oper_mode == self.ProgrammingOperationMode.Schedule_programming_mode:\n return {ZONNSMART_MODE_ATTR: 0}\n if oper_mode == self.ProgrammingOperationMode.Simple:\n return {ZONNSMART_MODE_ATTR: 1}\n self.error(\"Unsupported value for ProgrammingOperationMode\")\n else:\n self.error(\"Unsupported value for SystemMode\")\n\n def mode_change(self, value):\n \"\"\"System Mode change.\"\"\"\n if value == 0:\n prog_mode = self.ProgrammingOperationMode.Schedule_programming_mode\n elif value == 1:\n prog_mode = self.ProgrammingOperationMode.Simple\n else:\n prog_mode = self.ProgrammingOperationMode.Simple\n\n self._update_attribute(self.attridx[\"system_mode\"], self.SystemMode.Heat)\n self._update_attribute(self.attridx[\"programing_oper_mode\"], prog_mode)\n\n\nclass ZONNSMARTUserInterface(TuyaUserInterfaceCluster):\n \"\"\"HVAC User interface cluster for tuya electric heating thermostats.\"\"\"\n\n _CHILD_LOCK_ATTR = ZONNSMART_CHILD_LOCK_ATTR\n\n\nclass SiterwellGS361_Type1(TuyaThermostat):\n \"\"\"SiterwellGS361 Thermostatic radiator valve and clones.\"\"\"\n\n signature = {\n # endpoint=1 profile=260 device_type=0 device_version=0 input_clusters=[0, 3]\n # output_clusters=[3, 25]>\n MODELS_INFO: [\n (\"_TYST11_jeaxp72v\", \"eaxp72v\"),\n (\"_TYST11_kfvq6avy\", \"fvq6avy\"),\n (\"_TYST11_zivfvd7h\", \"ivfvd7h\"),\n (\"_TYST11_hhrtiq0x\", \"hrtiq0x\"),\n (\"_TYST11_ps5v5jor\", \"s5v5jor\"),\n (\"_TYST11_owwdxjbx\", \"wwdxjbx\"),\n (\"_TYST11_8daqwrsj\", \"daqwrsj\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.ON_OFF_SWITCH,\n INPUT_CLUSTERS: [Basic.cluster_id, Identify.cluster_id],\n OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.THERMOSTAT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n SiterwellManufCluster,\n SiterwellThermostat,\n SiterwellUserInterface,\n TuyaPowerConfigurationCluster,\n ],\n OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id],\n }\n }\n }\n\n\nclass SiterwellGS361_Type2(TuyaThermostat):\n \"\"\"SiterwellGS361 Thermostatic radiator valve and clones (2nd cluster signature).\"\"\"\n\n signature = {\n # endpoint=1 profile=260 device_type=81 device_version=0 input_clusters=[0, 4, 5, 61184]\n # output_clusters=[10, 25]>\n MODELS_INFO: [\n (\"_TZE200_jeaxp72v\", \"TS0601\"),\n (\"_TZE200_kfvq6avy\", \"TS0601\"),\n (\"_TZE200_zivfvd7h\", \"TS0601\"),\n (\"_TZE200_hhrtiq0x\", \"TS0601\"),\n (\"_TZE200_ps5v5jor\", \"TS0601\"),\n (\"_TZE200_owwdxjbx\", \"TS0601\"),\n (\"_TZE200_8daqwrsj\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TuyaManufClusterAttributes.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.THERMOSTAT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n SiterwellManufCluster,\n SiterwellThermostat,\n SiterwellUserInterface,\n TuyaPowerConfigurationCluster,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n }\n }\n }\n\n\nclass MoesHY368_Type1(TuyaThermostat):\n \"\"\"MoesHY368 Thermostatic radiator valve.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Init device.\"\"\"\n self.window_detection_bus = Bus()\n super().__init__(*args, **kwargs)\n\n signature = {\n # endpoint=1 profile=260 device_type=81 device_version=0 input_clusters=[0, 4, 5, 61184]\n # output_clusters=[10, 25]>\n MODELS_INFO: [\n (\"_TZE200_ckud7u2l\", \"TS0601\"),\n (\"_TZE200_ywdxldoj\", \"TS0601\"),\n (\"_TZE200_cwnjrr72\", \"TS0601\"),\n (\"_TZE200_b6wax7g0\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TuyaManufClusterAttributes.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.THERMOSTAT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n MoesManufCluster,\n MoesThermostat,\n MoesUserInterface,\n MoesWindowDetection,\n TuyaPowerConfigurationCluster,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n }\n }\n }\n\n\nclass MoesHY368_Type2(TuyaThermostat):\n \"\"\"MoesHY368 Thermostatic radiator valve (2nd cluster signature).\"\"\"\n\n signature = {\n # endpoint=1 profile=260 device_type=0 device_version=0 input_clusters=[0, 3]\n # output_clusters=[3, 25]>\n MODELS_INFO: [\n (\"_TYST11_ckud7u2l\", \"kud7u2l\"),\n (\"_TYST11_ywdxldoj\", \"wdxldoj\"),\n (\"_TYST11_cwnjrr72\", \"wnjrr72\"),\n (\"_TYST11_b6wax7g0\", \"6wax7g0\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.ON_OFF_SWITCH,\n INPUT_CLUSTERS: [Basic.cluster_id, Identify.cluster_id],\n OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.THERMOSTAT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n MoesManufCluster,\n MoesThermostat,\n MoesUserInterface,\n MoesWindowDetection,\n TuyaPowerConfigurationCluster,\n ],\n OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id],\n }\n }\n }\n\n\nclass ZonnsmartTV01_ZG(TuyaThermostat):\n \"\"\"ZONNSMART TV01-ZG Thermostatic radiator valve.\"\"\"\n\n signature = {\n # endpoint=1 profile=260 device_type=81 device_version=0 input_clusters=[0, 4, 5, 61184]\n # output_clusters=[10, 25]>\n MODELS_INFO: [\n (\"_TZE200_e9ba97vf\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TuyaManufClusterAttributes.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.THERMOSTAT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n ZONNSMARTManufCluster,\n ZONNSMARTThermostat,\n ZONNSMARTUserInterface,\n TuyaPowerConfigurationCluster,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n }\n }\n }\n",
"path": "zhaquirks/tuya/valve.py"
}
] | [
{
"content": "\"\"\"Map from manufacturer to standard clusters for thermostatic valves.\"\"\"\nimport logging\nfrom typing import Optional, Union\n\nfrom zigpy.profiles import zha\nimport zigpy.types as t\nfrom zigpy.zcl import foundation\nfrom zigpy.zcl.clusters.general import Basic, Groups, Identify, OnOff, Ota, Scenes, Time\nfrom zigpy.zcl.clusters.hvac import Thermostat\n\nfrom zhaquirks import Bus, LocalDataCluster\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\nfrom zhaquirks.tuya import (\n TuyaManufClusterAttributes,\n TuyaPowerConfigurationCluster,\n TuyaThermostat,\n TuyaThermostatCluster,\n TuyaUserInterfaceCluster,\n)\n\n# info from https://github.com/Koenkk/zigbee-herdsman-converters/blob/master/converters/common.js#L113\n# and https://github.com/Koenkk/zigbee-herdsman-converters/blob/master/converters/fromZigbee.js#L362\nSITERWELL_CHILD_LOCK_ATTR = 0x0107 # [0] unlocked [1] child-locked\nSITERWELL_WINDOW_DETECT_ATTR = 0x0112 # [0] inactive [1] active\nSITERWELL_VALVE_DETECT_ATTR = 0x0114 # [0] do not report [1] report\nSITERWELL_VALVE_STATE_ATTR = 0x026D # [0,0,0,55] opening percentage\nSITERWELL_TARGET_TEMP_ATTR = 0x0202 # [0,0,0,210] target room temp (decidegree)\nSITERWELL_TEMPERATURE_ATTR = 0x0203 # [0,0,0,200] current room temp (decidegree)\nSITERWELL_BATTERY_ATTR = 0x0215 # [0,0,0,98] battery charge\nSITERWELL_MODE_ATTR = 0x0404 # [0] off [1] scheduled [2] manual\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass SiterwellManufCluster(TuyaManufClusterAttributes):\n \"\"\"Manufacturer Specific Cluster of some thermostatic valves.\"\"\"\n\n manufacturer_attributes = {\n SITERWELL_CHILD_LOCK_ATTR: (\"child_lock\", t.uint8_t),\n SITERWELL_WINDOW_DETECT_ATTR: (\"window_detection\", t.uint8_t),\n SITERWELL_VALVE_DETECT_ATTR: (\"valve_detect\", t.uint8_t),\n SITERWELL_VALVE_STATE_ATTR: (\"valve_state\", t.uint32_t),\n SITERWELL_TARGET_TEMP_ATTR: (\"target_temperature\", t.uint32_t),\n SITERWELL_TEMPERATURE_ATTR: (\"temperature\", t.uint32_t),\n SITERWELL_BATTERY_ATTR: (\"battery\", t.uint32_t),\n SITERWELL_MODE_ATTR: (\"mode\", t.uint8_t),\n }\n\n TEMPERATURE_ATTRS = {\n SITERWELL_TEMPERATURE_ATTR: \"local_temp\",\n SITERWELL_TARGET_TEMP_ATTR: \"occupied_heating_setpoint\",\n }\n\n def _update_attribute(self, attrid, value):\n super()._update_attribute(attrid, value)\n if attrid in self.TEMPERATURE_ATTRS:\n self.endpoint.device.thermostat_bus.listener_event(\n \"temperature_change\",\n self.TEMPERATURE_ATTRS[attrid],\n value * 10, # decidegree to centidegree\n )\n elif attrid == SITERWELL_MODE_ATTR:\n self.endpoint.device.thermostat_bus.listener_event(\"mode_change\", value)\n self.endpoint.device.thermostat_bus.listener_event(\n \"state_change\", value > 0\n )\n elif attrid == SITERWELL_VALVE_STATE_ATTR:\n self.endpoint.device.thermostat_bus.listener_event(\"state_change\", value)\n elif attrid == SITERWELL_CHILD_LOCK_ATTR:\n mode = 1 if value else 0\n self.endpoint.device.ui_bus.listener_event(\"child_lock_change\", mode)\n elif attrid == SITERWELL_BATTERY_ATTR:\n self.endpoint.device.battery_bus.listener_event(\"battery_change\", value)\n\n\nclass SiterwellThermostat(TuyaThermostatCluster):\n \"\"\"Thermostat cluster for some thermostatic valves.\"\"\"\n\n def map_attribute(self, attribute, value):\n \"\"\"Map standardized attribute value to dict of manufacturer values.\"\"\"\n\n if attribute == \"occupied_heating_setpoint\":\n # centidegree to decidegree\n return {SITERWELL_TARGET_TEMP_ATTR: round(value / 10)}\n if attribute in (\"system_mode\", \"programing_oper_mode\"):\n if attribute == \"system_mode\":\n system_mode = value\n oper_mode = self._attr_cache.get(\n self.attridx[\"programing_oper_mode\"],\n self.ProgrammingOperationMode.Simple,\n )\n else:\n system_mode = self._attr_cache.get(\n self.attridx[\"system_mode\"], self.SystemMode.Heat\n )\n oper_mode = value\n if system_mode == self.SystemMode.Off:\n return {SITERWELL_MODE_ATTR: 0}\n if system_mode == self.SystemMode.Heat:\n if oper_mode == self.ProgrammingOperationMode.Schedule_programming_mode:\n return {SITERWELL_MODE_ATTR: 1}\n if oper_mode == self.ProgrammingOperationMode.Simple:\n return {SITERWELL_MODE_ATTR: 2}\n self.error(\"Unsupported value for ProgrammingOperationMode\")\n else:\n self.error(\"Unsupported value for SystemMode\")\n\n def mode_change(self, value):\n \"\"\"System Mode change.\"\"\"\n if value == 0:\n self._update_attribute(self.attridx[\"system_mode\"], self.SystemMode.Off)\n return\n\n if value == 1:\n mode = self.ProgrammingOperationMode.Schedule_programming_mode\n else:\n mode = self.ProgrammingOperationMode.Simple\n\n self._update_attribute(self.attridx[\"system_mode\"], self.SystemMode.Heat)\n self._update_attribute(self.attridx[\"programing_oper_mode\"], mode)\n\n\nclass SiterwellUserInterface(TuyaUserInterfaceCluster):\n \"\"\"HVAC User interface cluster for tuya electric heating thermostats.\"\"\"\n\n _CHILD_LOCK_ATTR = SITERWELL_CHILD_LOCK_ATTR\n\n\n# info from https://github.com/Koenkk/zigbee-herdsman-converters/blob/master/lib/tuya.js\n# and https://github.com/Koenkk/zigbee-herdsman-converters/blob/master/converters/fromZigbee.js#L2777\nMOES_TARGET_TEMP_ATTR = 0x0202 # target room temp (decidegree)\nMOES_TEMPERATURE_ATTR = 0x0203 # current room temp (decidegree)\nMOES_MODE_ATTR = 0x0404 # [0] away [1] scheduled [2] manual [3] comfort [4] eco [5] boost [6] complex\nMOES_CHILD_LOCK_ATTR = 0x0107 # [0] unlocked [1] child-locked\nMOES_VALVE_DETECT_ATTR = 0x0114 # [0] do not report [1] report\nMOES_TEMP_CALIBRATION_ATTR = 0x022C # temperature calibration (decidegree)\nMOES_MIN_TEMPERATURE_ATTR = 0x0266 # minimum limit of temperature setting (decidegree)\nMOES_MAX_TEMPERATURE_ATTR = 0x0267 # maximum limit of temperature setting (decidegree)\nMOES_WINDOW_DETECT_ATTR = 0x0068 # [0,35,5] on/off, temperature, operating time (min)\nMOES_BOOST_TIME_ATTR = 0x0269 # BOOST mode operating time in (sec)\nMOES_FORCE_VALVE_ATTR = 0x046A # [0] normal [1] open [2] close\nMOES_COMFORT_TEMP_ATTR = 0x026B # comfort mode temperaure (decidegree)\nMOES_ECO_TEMP_ATTR = 0x026C # eco mode temperature (decidegree)\nMOES_VALVE_STATE_ATTR = 0x026D # opening percentage\nMOES_BATTERY_LOW_ATTR = 0x016E # battery low warning\nMOES_WEEK_FORMAT_ATTR = 0x046F # [0] 5 days [1] 6 days, [2] 7 days\nMOES_AWAY_TEMP_ATTR = 0x0272 # away mode temperature (decidegree)\nMOES_AUTO_LOCK_ATTR = 0x0174 # [0] auto [1] manual\nMOES_AWAY_DAYS_ATTR = 0x0275 # away mode duration (days)\n\n# schedule [6,0,20,8,0,15,11,30,15,12,30,15,17,30,20,22,0,15]\n# 6:00 - 20*, 8:00 - 15*, 11:30 - 15*, 12:30 - 15*, 17:30 - 20*, 22:00 - 15*\n# Top bits in hours have special meaning\n# 8: ??\n# 7: Current schedule indicator\nMOES_SCHEDULE_WORKDAY_ATTR = 0x0070\nMOES_SCHEDULE_WEEKEND_ATTR = 0x0071\n\n\nclass data144(t.FixedList, item_type=t.uint8_t, length=18):\n \"\"\"General data, Discrete, 144 bit.\"\"\"\n\n pass\n\n\nclass MoesManufCluster(TuyaManufClusterAttributes):\n \"\"\"Manufacturer Specific Cluster of some thermostatic valves.\"\"\"\n\n set_time_offset = 1970\n\n manufacturer_attributes = {\n MOES_CHILD_LOCK_ATTR: (\"child_lock\", t.uint8_t),\n MOES_WINDOW_DETECT_ATTR: (\"window_detection\", t.data24),\n MOES_VALVE_DETECT_ATTR: (\"valve_detect\", t.uint8_t),\n MOES_VALVE_STATE_ATTR: (\"valve_state\", t.uint32_t),\n MOES_TARGET_TEMP_ATTR: (\"target_temperature\", t.uint32_t),\n MOES_TEMPERATURE_ATTR: (\"temperature\", t.uint32_t),\n MOES_MODE_ATTR: (\"mode\", t.uint8_t),\n MOES_TEMP_CALIBRATION_ATTR: (\"temperature_calibration\", t.int32s),\n MOES_MIN_TEMPERATURE_ATTR: (\"min_temperature\", t.uint32_t),\n MOES_MAX_TEMPERATURE_ATTR: (\"max_temperature\", t.uint32_t),\n MOES_BOOST_TIME_ATTR: (\"boost_duration_seconds\", t.uint32_t),\n MOES_FORCE_VALVE_ATTR: (\"valve_force_state\", t.uint8_t),\n MOES_COMFORT_TEMP_ATTR: (\"comfort_mode_temperature\", t.uint32_t),\n MOES_ECO_TEMP_ATTR: (\"eco_mode_temperature\", t.uint32_t),\n MOES_BATTERY_LOW_ATTR: (\"battery_low\", t.uint8_t),\n MOES_WEEK_FORMAT_ATTR: (\"week_format\", t.uint8_t),\n MOES_AWAY_TEMP_ATTR: (\"away_mode_temperature\", t.uint32_t),\n MOES_AUTO_LOCK_ATTR: (\"auto_lock\", t.uint8_t),\n MOES_AWAY_DAYS_ATTR: (\"away_duration_days\", t.uint32_t),\n MOES_SCHEDULE_WORKDAY_ATTR: (\"workday_schedule\", data144),\n MOES_SCHEDULE_WEEKEND_ATTR: (\"weekend_schedule\", data144),\n }\n\n DIRECT_MAPPED_ATTRS = {\n MOES_TEMPERATURE_ATTR: (\"local_temp\", lambda value: value * 10),\n MOES_TARGET_TEMP_ATTR: (\"occupied_heating_setpoint\", lambda value: value * 10),\n MOES_AWAY_TEMP_ATTR: (\"unoccupied_heating_setpoint\", lambda value: value * 100),\n MOES_COMFORT_TEMP_ATTR: (\"comfort_heating_setpoint\", lambda value: value * 100),\n MOES_ECO_TEMP_ATTR: (\"eco_heating_setpoint\", lambda value: value * 100),\n MOES_TEMP_CALIBRATION_ATTR: (\n \"local_temperature_calibration\",\n lambda value: value * 10,\n ),\n MOES_MIN_TEMPERATURE_ATTR: (\n \"min_heat_setpoint_limit\",\n lambda value: value * 100,\n ),\n MOES_MAX_TEMPERATURE_ATTR: (\n \"max_heat_setpoint_limit\",\n lambda value: value * 100,\n ),\n MOES_VALVE_STATE_ATTR: (\"valve_open_percentage\", None),\n MOES_AWAY_DAYS_ATTR: (\"unoccupied_duration_days\", None),\n MOES_BOOST_TIME_ATTR: (\"boost_duration_seconds\", None),\n MOES_MODE_ATTR: (\"operation_preset\", None),\n MOES_WEEK_FORMAT_ATTR: (\"work_days\", None),\n MOES_FORCE_VALVE_ATTR: (\"valve_force_state\", None),\n }\n\n def _update_attribute(self, attrid, value):\n super()._update_attribute(attrid, value)\n if attrid in self.DIRECT_MAPPED_ATTRS:\n self.endpoint.device.thermostat_bus.listener_event(\n \"temperature_change\",\n self.DIRECT_MAPPED_ATTRS[attrid][0],\n value\n if self.DIRECT_MAPPED_ATTRS[attrid][1] is None\n else self.DIRECT_MAPPED_ATTRS[attrid][1](\n value\n ), # decidegree to centidegree\n )\n elif attrid in (MOES_SCHEDULE_WORKDAY_ATTR, MOES_SCHEDULE_WEEKEND_ATTR):\n self.endpoint.device.thermostat_bus.listener_event(\n \"schedule_change\", attrid, value\n )\n\n if attrid == MOES_WINDOW_DETECT_ATTR:\n self.endpoint.device.window_detection_bus.listener_event(\n \"window_detect_change\", value\n )\n elif attrid == MOES_MODE_ATTR:\n self.endpoint.device.thermostat_bus.listener_event(\"mode_change\", value)\n elif attrid == MOES_VALVE_STATE_ATTR:\n self.endpoint.device.thermostat_bus.listener_event(\"state_change\", value)\n elif attrid == MOES_CHILD_LOCK_ATTR:\n mode = 1 if value else 0\n self.endpoint.device.ui_bus.listener_event(\"child_lock_change\", mode)\n elif attrid == MOES_AUTO_LOCK_ATTR:\n mode = 1 if value else 0\n self.endpoint.device.ui_bus.listener_event(\"autolock_change\", mode)\n elif attrid == MOES_BATTERY_LOW_ATTR:\n self.endpoint.device.battery_bus.listener_event(\n \"battery_change\", 5 if value else 100\n )\n\n\nclass MoesThermostat(TuyaThermostatCluster):\n \"\"\"Thermostat cluster for some thermostatic valves.\"\"\"\n\n class Preset(t.enum8):\n \"\"\"Working modes of the thermostat.\"\"\"\n\n Away = 0x00\n Schedule = 0x01\n Manual = 0x02\n Comfort = 0x03\n Eco = 0x04\n Boost = 0x05\n Complex = 0x06\n\n class WorkDays(t.enum8):\n \"\"\"Workday configuration for scheduler operation mode.\"\"\"\n\n MonToFri = 0x00\n MonToSat = 0x01\n MonToSun = 0x02\n\n class ForceValveState(t.enum8):\n \"\"\"Force valve state option.\"\"\"\n\n Normal = 0x00\n Open = 0x01\n Close = 0x02\n\n _CONSTANT_ATTRIBUTES = {\n 0x001B: Thermostat.ControlSequenceOfOperation.Heating_Only,\n 0x001C: Thermostat.SystemMode.Heat,\n }\n\n manufacturer_attributes = {\n 0x4000: (\"comfort_heating_setpoint\", t.int16s),\n 0x4001: (\"eco_heating_setpoint\", t.int16s),\n 0x4002: (\"operation_preset\", Preset),\n 0x4003: (\"work_days\", WorkDays),\n 0x4004: (\"valve_open_percentage\", t.uint8_t),\n 0x4005: (\"boost_duration_seconds\", t.uint32_t),\n 0x4006: (\"valve_force_state\", ForceValveState),\n 0x4007: (\"unoccupied_duration_days\", t.uint32_t),\n 0x4110: (\"workday_schedule_1_hour\", t.uint8_t),\n 0x4111: (\"workday_schedule_1_minute\", t.uint8_t),\n 0x4112: (\"workday_schedule_1_temperature\", t.int16s),\n 0x4120: (\"workday_schedule_2_hour\", t.uint8_t),\n 0x4121: (\"workday_schedule_2_minute\", t.uint8_t),\n 0x4122: (\"workday_schedule_2_temperature\", t.int16s),\n 0x4130: (\"workday_schedule_3_hour\", t.uint8_t),\n 0x4131: (\"workday_schedule_3_minute\", t.uint8_t),\n 0x4132: (\"workday_schedule_3_temperature\", t.int16s),\n 0x4140: (\"workday_schedule_4_hour\", t.uint8_t),\n 0x4141: (\"workday_schedule_4_minute\", t.uint8_t),\n 0x4142: (\"workday_schedule_4_temperature\", t.int16s),\n 0x4150: (\"workday_schedule_5_hour\", t.uint8_t),\n 0x4151: (\"workday_schedule_5_minute\", t.uint8_t),\n 0x4152: (\"workday_schedule_5_temperature\", t.int16s),\n 0x4160: (\"workday_schedule_6_hour\", t.uint8_t),\n 0x4161: (\"workday_schedule_6_minute\", t.uint8_t),\n 0x4162: (\"workday_schedule_6_temperature\", t.int16s),\n 0x4210: (\"weekend_schedule_1_hour\", t.uint8_t),\n 0x4211: (\"weekend_schedule_1_minute\", t.uint8_t),\n 0x4212: (\"weekend_schedule_1_temperature\", t.int16s),\n 0x4220: (\"weekend_schedule_2_hour\", t.uint8_t),\n 0x4221: (\"weekend_schedule_2_minute\", t.uint8_t),\n 0x4222: (\"weekend_schedule_2_temperature\", t.int16s),\n 0x4230: (\"weekend_schedule_3_hour\", t.uint8_t),\n 0x4231: (\"weekend_schedule_3_minute\", t.uint8_t),\n 0x4232: (\"weekend_schedule_3_temperature\", t.int16s),\n 0x4240: (\"weekend_schedule_4_hour\", t.uint8_t),\n 0x4241: (\"weekend_schedule_4_minute\", t.uint8_t),\n 0x4242: (\"weekend_schedule_4_temperature\", t.int16s),\n 0x4250: (\"weekend_schedule_5_hour\", t.uint8_t),\n 0x4251: (\"weekend_schedule_5_minute\", t.uint8_t),\n 0x4252: (\"weekend_schedule_5_temperature\", t.int16s),\n 0x4260: (\"weekend_schedule_6_hour\", t.uint8_t),\n 0x4261: (\"weekend_schedule_6_minute\", t.uint8_t),\n 0x4262: (\"weekend_schedule_6_temperature\", t.int16s),\n }\n\n DIRECT_MAPPING_ATTRS = {\n \"occupied_heating_setpoint\": (\n MOES_TARGET_TEMP_ATTR,\n lambda value: round(value / 10),\n ),\n \"unoccupied_heating_setpoint\": (\n MOES_AWAY_TEMP_ATTR,\n lambda value: round(value / 100),\n ),\n \"comfort_heating_setpoint\": (\n MOES_COMFORT_TEMP_ATTR,\n lambda value: round(value / 100),\n ),\n \"eco_heating_setpoint\": (MOES_ECO_TEMP_ATTR, lambda value: round(value / 100)),\n \"min_heat_setpoint_limit\": (\n MOES_MIN_TEMPERATURE_ATTR,\n lambda value: round(value / 100),\n ),\n \"max_heat_setpoint_limit\": (\n MOES_MAX_TEMPERATURE_ATTR,\n lambda value: round(value / 100),\n ),\n \"local_temperature_calibration\": (\n MOES_TEMP_CALIBRATION_ATTR,\n lambda value: round(value / 10),\n ),\n \"work_days\": (MOES_WEEK_FORMAT_ATTR, None),\n \"operation_preset\": (MOES_MODE_ATTR, None),\n \"boost_duration_seconds\": (MOES_BOOST_TIME_ATTR, None),\n \"valve_force_state\": (MOES_FORCE_VALVE_ATTR, None),\n \"unoccupied_duration_days\": (MOES_AWAY_DAYS_ATTR, None),\n }\n\n WORKDAY_SCHEDULE_ATTRS = {\n \"workday_schedule_6_temperature\": 1500,\n \"workday_schedule_6_minute\": 0,\n \"workday_schedule_6_hour\": 22,\n \"workday_schedule_5_temperature\": 2000,\n \"workday_schedule_5_minute\": 30,\n \"workday_schedule_5_hour\": 17,\n \"workday_schedule_4_temperature\": 1500,\n \"workday_schedule_4_minute\": 30,\n \"workday_schedule_4_hour\": 12,\n \"workday_schedule_3_temperature\": 1500,\n \"workday_schedule_3_minute\": 30,\n \"workday_schedule_3_hour\": 11,\n \"workday_schedule_2_temperature\": 1500,\n \"workday_schedule_2_minute\": 0,\n \"workday_schedule_2_hour\": 8,\n \"workday_schedule_1_temperature\": 2000,\n \"workday_schedule_1_minute\": 0,\n \"workday_schedule_1_hour\": 6,\n }\n\n WEEKEND_SCHEDULE_ATTRS = {\n \"weekend_schedule_6_temperature\": 1500,\n \"weekend_schedule_6_minute\": 0,\n \"weekend_schedule_6_hour\": 22,\n \"weekend_schedule_5_temperature\": 2000,\n \"weekend_schedule_5_minute\": 30,\n \"weekend_schedule_5_hour\": 17,\n \"weekend_schedule_4_temperature\": 1500,\n \"weekend_schedule_4_minute\": 30,\n \"weekend_schedule_4_hour\": 12,\n \"weekend_schedule_3_temperature\": 1500,\n \"weekend_schedule_3_minute\": 30,\n \"weekend_schedule_3_hour\": 11,\n \"weekend_schedule_2_temperature\": 1500,\n \"weekend_schedule_2_minute\": 0,\n \"weekend_schedule_2_hour\": 8,\n \"weekend_schedule_1_temperature\": 2000,\n \"weekend_schedule_1_minute\": 0,\n \"weekend_schedule_1_hour\": 6,\n }\n\n def map_attribute(self, attribute, value):\n \"\"\"Map standardized attribute value to dict of manufacturer values.\"\"\"\n\n if attribute in self.DIRECT_MAPPING_ATTRS:\n return {\n self.DIRECT_MAPPING_ATTRS[attribute][0]: value\n if self.DIRECT_MAPPING_ATTRS[attribute][1] is None\n else self.DIRECT_MAPPING_ATTRS[attribute][1](value)\n }\n if attribute in (\"programing_oper_mode\", \"occupancy\"):\n if attribute == \"occupancy\":\n occupancy = value\n oper_mode = self._attr_cache.get(\n self.attridx[\"programing_oper_mode\"],\n self.ProgrammingOperationMode.Simple,\n )\n else:\n occupancy = self._attr_cache.get(\n self.attridx[\"occupancy\"], self.Occupancy.Occupied\n )\n oper_mode = value\n if occupancy == self.Occupancy.Unoccupied:\n return {MOES_MODE_ATTR: 0}\n if occupancy == self.Occupancy.Occupied:\n if oper_mode == self.ProgrammingOperationMode.Schedule_programming_mode:\n return {MOES_MODE_ATTR: 1}\n if oper_mode == self.ProgrammingOperationMode.Simple:\n return {MOES_MODE_ATTR: 2}\n if oper_mode == self.ProgrammingOperationMode.Economy_mode:\n return {MOES_MODE_ATTR: 4}\n self.error(\"Unsupported value for ProgrammingOperationMode\")\n else:\n self.error(\"Unsupported value for Occupancy\")\n if attribute == \"system_mode\":\n return {\n MOES_MODE_ATTR: self._attr_cache.get(\n self.attridx[\"operation_preset\"], 2\n )\n }\n if attribute in self.WORKDAY_SCHEDULE_ATTRS:\n data = data144()\n for num, (attr, default) in enumerate(self.WORKDAY_SCHEDULE_ATTRS.items()):\n\n if num % 3 == 0:\n if attr == attribute:\n val = round(value / 100)\n else:\n val = round(\n self._attr_cache.get(self.attridx[attr], default) / 100\n )\n else:\n if attr == attribute:\n val = value\n else:\n val = self._attr_cache.get(self.attridx[attr], default)\n\n data.append(val)\n return {MOES_SCHEDULE_WORKDAY_ATTR: data}\n if attribute in self.WEEKEND_SCHEDULE_ATTRS:\n data = data144()\n for num, (attr, default) in enumerate(self.WEEKEND_SCHEDULE_ATTRS.items()):\n\n if num % 3 == 0:\n if attr == attribute:\n val = round(value / 100)\n else:\n val = round(\n self._attr_cache.get(self.attridx[attr], default) / 100\n )\n else:\n if attr == attribute:\n val = value\n else:\n val = self._attr_cache.get(self.attridx[attr], default)\n\n data.append(val)\n return {MOES_SCHEDULE_WEEKEND_ATTR: data}\n\n def mode_change(self, value):\n \"\"\"System Mode change.\"\"\"\n if value == 0:\n prog_mode = self.ProgrammingOperationMode.Simple\n occupancy = self.Occupancy.Unoccupied\n elif value == 1:\n prog_mode = self.ProgrammingOperationMode.Schedule_programming_mode\n occupancy = self.Occupancy.Occupied\n elif value == 2:\n prog_mode = self.ProgrammingOperationMode.Simple\n occupancy = self.Occupancy.Occupied\n elif value == 3:\n prog_mode = self.ProgrammingOperationMode.Simple\n occupancy = self.Occupancy.Occupied\n elif value == 4:\n prog_mode = self.ProgrammingOperationMode.Economy_mode\n occupancy = self.Occupancy.Occupied\n elif value == 5:\n prog_mode = self.ProgrammingOperationMode.Simple\n occupancy = self.Occupancy.Occupied\n else:\n prog_mode = self.ProgrammingOperationMode.Simple\n occupancy = self.Occupancy.Occupied\n\n self._update_attribute(self.attridx[\"programing_oper_mode\"], prog_mode)\n self._update_attribute(self.attridx[\"occupancy\"], occupancy)\n\n def schedule_change(self, attr, value):\n \"\"\"Scheduler attribute change.\"\"\"\n\n if attr == MOES_SCHEDULE_WORKDAY_ATTR:\n self._update_attribute(\n self.attridx[\"workday_schedule_1_hour\"], value[17] & 0x3F\n )\n self._update_attribute(self.attridx[\"workday_schedule_1_minute\"], value[16])\n self._update_attribute(\n self.attridx[\"workday_schedule_1_temperature\"], value[15] * 100\n )\n self._update_attribute(\n self.attridx[\"workday_schedule_2_hour\"], value[14] & 0x3F\n )\n self._update_attribute(self.attridx[\"workday_schedule_2_minute\"], value[13])\n self._update_attribute(\n self.attridx[\"workday_schedule_2_temperature\"], value[12] * 100\n )\n self._update_attribute(\n self.attridx[\"workday_schedule_3_hour\"], value[11] & 0x3F\n )\n self._update_attribute(self.attridx[\"workday_schedule_3_minute\"], value[10])\n self._update_attribute(\n self.attridx[\"workday_schedule_3_temperature\"], value[9] * 100\n )\n self._update_attribute(\n self.attridx[\"workday_schedule_4_hour\"], value[8] & 0x3F\n )\n self._update_attribute(self.attridx[\"workday_schedule_4_minute\"], value[7])\n self._update_attribute(\n self.attridx[\"workday_schedule_4_temperature\"], value[6] * 100\n )\n self._update_attribute(\n self.attridx[\"workday_schedule_5_hour\"], value[5] & 0x3F\n )\n self._update_attribute(self.attridx[\"workday_schedule_5_minute\"], value[4])\n self._update_attribute(\n self.attridx[\"workday_schedule_5_temperature\"], value[3] * 100\n )\n self._update_attribute(\n self.attridx[\"workday_schedule_6_hour\"], value[2] & 0x3F\n )\n self._update_attribute(self.attridx[\"workday_schedule_6_minute\"], value[1])\n self._update_attribute(\n self.attridx[\"workday_schedule_6_temperature\"], value[0] * 100\n )\n elif attr == MOES_SCHEDULE_WEEKEND_ATTR:\n self._update_attribute(\n self.attridx[\"weekend_schedule_1_hour\"], value[17] & 0x3F\n )\n self._update_attribute(self.attridx[\"weekend_schedule_1_minute\"], value[16])\n self._update_attribute(\n self.attridx[\"weekend_schedule_1_temperature\"], value[15] * 100\n )\n self._update_attribute(\n self.attridx[\"weekend_schedule_2_hour\"], value[14] & 0x3F\n )\n self._update_attribute(self.attridx[\"weekend_schedule_2_minute\"], value[13])\n self._update_attribute(\n self.attridx[\"weekend_schedule_2_temperature\"], value[12] * 100\n )\n self._update_attribute(\n self.attridx[\"weekend_schedule_3_hour\"], value[11] & 0x3F\n )\n self._update_attribute(self.attridx[\"weekend_schedule_3_minute\"], value[10])\n self._update_attribute(\n self.attridx[\"weekend_schedule_3_temperature\"], value[9] * 100\n )\n self._update_attribute(\n self.attridx[\"weekend_schedule_4_hour\"], value[8] & 0x3F\n )\n self._update_attribute(self.attridx[\"weekend_schedule_4_minute\"], value[7])\n self._update_attribute(\n self.attridx[\"weekend_schedule_4_temperature\"], value[6] * 100\n )\n self._update_attribute(\n self.attridx[\"weekend_schedule_5_hour\"], value[5] & 0x3F\n )\n self._update_attribute(self.attridx[\"weekend_schedule_5_minute\"], value[4])\n self._update_attribute(\n self.attridx[\"weekend_schedule_5_temperature\"], value[3] * 100\n )\n self._update_attribute(\n self.attridx[\"weekend_schedule_6_hour\"], value[2] & 0x3F\n )\n self._update_attribute(self.attridx[\"weekend_schedule_6_minute\"], value[1])\n self._update_attribute(\n self.attridx[\"weekend_schedule_6_temperature\"], value[0] * 100\n )\n\n\nclass MoesUserInterface(TuyaUserInterfaceCluster):\n \"\"\"HVAC User interface cluster for tuya electric heating thermostats.\"\"\"\n\n _CHILD_LOCK_ATTR = MOES_CHILD_LOCK_ATTR\n\n manufacturer_attributes = {\n 0x5000: (\"auto_lock\", t.Bool),\n }\n\n def autolock_change(self, value):\n \"\"\"Automatic lock change.\"\"\"\n\n self._update_attribute(self.attridx[\"auto_lock\"], value)\n\n def map_attribute(self, attribute, value):\n \"\"\"Map standardized attribute value to dict of manufacturer values.\"\"\"\n\n if attribute == \"auto_lock\":\n return {MOES_AUTO_LOCK_ATTR: value}\n\n\nclass MoesWindowDetection(LocalDataCluster, OnOff):\n \"\"\"On/Off cluster for the window detection function of the electric heating thermostats.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Init.\"\"\"\n super().__init__(*args, **kwargs)\n self.endpoint.device.window_detection_bus.add_listener(self)\n\n manufacturer_attributes = {\n 0x6000: (\"window_detection_temperature\", t.int16s),\n 0x6001: (\"window_detection_timeout_minutes\", t.uint8_t),\n }\n\n def window_detect_change(self, value):\n \"\"\"Window detection change.\"\"\"\n\n self._update_attribute(\n self.attridx[\"window_detection_timeout_minutes\"], value[0]\n )\n self._update_attribute(\n self.attridx[\"window_detection_temperature\"], value[1] * 100\n )\n self._update_attribute(self.attridx[\"on_off\"], value[2])\n\n async def write_attributes(self, attributes, manufacturer=None):\n \"\"\"Defer attributes writing to the set_data tuya command.\"\"\"\n\n records = self._write_attr_records(attributes)\n\n if not records:\n return [[foundation.WriteAttributesStatusRecord(foundation.Status.SUCCESS)]]\n\n has_change = False\n data = t.data24()\n data.append(\n self._attr_cache.get(\n self.attridx[\"window_detection_timeout_minutes\"],\n 5,\n )\n )\n data.append(\n round(\n self._attr_cache.get(\n self.attridx[\"window_detection_temperature\"],\n 50,\n )\n / 100\n )\n )\n data.append(\n self._attr_cache.get(\n self.attridx[\"on_off\"],\n False,\n )\n )\n\n for record in records:\n attr_name = self.attributes[record.attrid][0]\n if attr_name == \"on_off\":\n data[2] = record.value.value\n has_change = True\n elif attr_name == \"window_detection_temperature\":\n data[1] = record.value.value / 100\n has_change = True\n elif attr_name == \"window_detection_timeout_minutes\":\n data[0] = record.value.value\n has_change = True\n\n if has_change:\n return await self.endpoint.tuya_manufacturer.write_attributes(\n {MOES_WINDOW_DETECT_ATTR: data}, manufacturer=manufacturer\n )\n\n return [\n [\n foundation.WriteAttributesStatusRecord(\n foundation.Status.FAILURE, r.attrid\n )\n for r in records\n ]\n ]\n\n async def command(\n self,\n command_id: Union[foundation.Command, int, t.uint8_t],\n *args,\n manufacturer: Optional[Union[int, t.uint16_t]] = None,\n expect_reply: bool = True,\n tsn: Optional[Union[int, t.uint8_t]] = None,\n ):\n \"\"\"Override the default Cluster command.\"\"\"\n\n if command_id in (0x0000, 0x0001, 0x0002):\n\n if command_id == 0x0000:\n value = False\n elif command_id == 0x0001:\n value = True\n else:\n attrid = self.attridx[\"on_off\"]\n success, _ = await self.read_attributes(\n (attrid,), manufacturer=manufacturer\n )\n try:\n value = success[attrid]\n except KeyError:\n return foundation.Status.FAILURE\n value = not value\n\n (res,) = await self.write_attributes(\n {\"on_off\": value},\n manufacturer=manufacturer,\n )\n return [command_id, res[0].status]\n\n return [command_id, foundation.Status.UNSUP_CLUSTER_COMMAND]\n\n\nZONNSMART_CHILD_LOCK_ATTR = 0x0128 # [0] unlocked [1] child-locked\nZONNSMART_WINDOW_DETECT_ATTR = 0x0108 # [0] inactive [1] active\nZONNSMART_TARGET_TEMP_ATTR = 0x0210 # [0,0,0,210] target room temp (decidegree)\nZONNSMART_TEMPERATURE_ATTR = 0x0218 # [0,0,0,200] current room temp (decidegree)\nZONNSMART_BATTERY_ATTR = 0x0223 # [0,0,0,98] battery charge\nZONNSMART_MODE_ATTR = (\n 0x0402 # [0] Scheduled/auto [1] manual [2] Holiday [3] HolidayReady\n)\nZONNSMART_HEATING_STOPPING = 0x016B # [0] inactive [1] active\nZONNSMART_BOOST_TIME_ATTR = 0x0265 # BOOST mode operating time in (sec)\nZONNSMART_UPTIME_TIME_ATTR = (\n 0x0024 # Seems to be the uptime attribute (sent hourly, increases) [0,200]\n)\n\n\nclass ZONNSMARTManufCluster(TuyaManufClusterAttributes):\n \"\"\"Manufacturer Specific Cluster of some thermostatic valves.\"\"\"\n\n manufacturer_attributes = {\n ZONNSMART_CHILD_LOCK_ATTR: (\"child_lock\", t.uint8_t),\n ZONNSMART_WINDOW_DETECT_ATTR: (\"window_detection\", t.uint8_t),\n ZONNSMART_TARGET_TEMP_ATTR: (\"target_temperature\", t.uint32_t),\n ZONNSMART_TEMPERATURE_ATTR: (\"temperature\", t.uint32_t),\n ZONNSMART_BATTERY_ATTR: (\"battery\", t.uint32_t),\n ZONNSMART_MODE_ATTR: (\"mode\", t.uint8_t),\n ZONNSMART_BOOST_TIME_ATTR: (\"boost_duration_seconds\", t.uint32_t),\n ZONNSMART_UPTIME_TIME_ATTR: (\"uptime\", t.uint32_t),\n ZONNSMART_HEATING_STOPPING: (\"heating_stop\", t.uint8_t),\n }\n\n DIRECT_MAPPED_ATTRS = {\n ZONNSMART_TEMPERATURE_ATTR: (\"local_temp\", lambda value: value * 10),\n ZONNSMART_TARGET_TEMP_ATTR: (\n \"occupied_heating_setpoint\",\n lambda value: value * 10,\n ),\n ZONNSMART_BOOST_TIME_ATTR: (\"boost_duration_seconds\", None),\n ZONNSMART_UPTIME_TIME_ATTR: (\"uptime_duration_hours\", None),\n }\n\n def _update_attribute(self, attrid, value):\n super()._update_attribute(attrid, value)\n if attrid in self.DIRECT_MAPPED_ATTRS:\n self.endpoint.device.thermostat_bus.listener_event(\n \"temperature_change\",\n self.DIRECT_MAPPED_ATTRS[attrid][0],\n value\n if self.DIRECT_MAPPED_ATTRS[attrid][1] is None\n else self.DIRECT_MAPPED_ATTRS[attrid][1](\n value\n ), # decidegree to centidegree\n )\n elif attrid == ZONNSMART_MODE_ATTR:\n self.endpoint.device.thermostat_bus.listener_event(\"mode_change\", value)\n elif attrid == ZONNSMART_HEATING_STOPPING:\n self.endpoint.device.thermostat_bus.listener_event(\n \"state_change\", value == 0\n )\n elif attrid == ZONNSMART_CHILD_LOCK_ATTR:\n mode = 1 if value else 0\n self.endpoint.device.ui_bus.listener_event(\"child_lock_change\", mode)\n elif attrid == ZONNSMART_BATTERY_ATTR:\n self.endpoint.device.battery_bus.listener_event(\"battery_change\", value)\n\n\nclass ZONNSMARTThermostat(TuyaThermostatCluster):\n \"\"\"Thermostat cluster for some thermostatic valves.\"\"\"\n\n DIRECT_MAPPING_ATTRS = {\n \"occupied_heating_setpoint\": (\n ZONNSMART_TARGET_TEMP_ATTR,\n lambda value: round(value / 10),\n ),\n \"operation_preset\": (ZONNSMART_MODE_ATTR, None),\n \"boost_duration_seconds\": (ZONNSMART_BOOST_TIME_ATTR, None),\n }\n\n def map_attribute(self, attribute, value):\n \"\"\"Map standardized attribute value to dict of manufacturer values.\"\"\"\n\n if attribute in self.DIRECT_MAPPING_ATTRS:\n return {\n self.DIRECT_MAPPING_ATTRS[attribute][0]: value\n if self.DIRECT_MAPPING_ATTRS[attribute][1] is None\n else self.DIRECT_MAPPING_ATTRS[attribute][1](value)\n }\n if attribute in (\"system_mode\", \"programing_oper_mode\"):\n if attribute == \"system_mode\":\n system_mode = value\n oper_mode = self._attr_cache.get(\n self.attridx[\"programing_oper_mode\"],\n self.ProgrammingOperationMode.Simple,\n )\n else:\n system_mode = self._attr_cache.get(\n self.attridx[\"system_mode\"], self.SystemMode.Heat\n )\n oper_mode = value\n if system_mode == self.SystemMode.Off:\n return {ZONNSMART_HEATING_STOPPING: 1}\n if system_mode == self.SystemMode.Heat:\n if oper_mode == self.ProgrammingOperationMode.Schedule_programming_mode:\n return {ZONNSMART_MODE_ATTR: 0}\n if oper_mode == self.ProgrammingOperationMode.Simple:\n return {ZONNSMART_MODE_ATTR: 1}\n self.error(\"Unsupported value for ProgrammingOperationMode\")\n else:\n self.error(\"Unsupported value for SystemMode\")\n\n def mode_change(self, value):\n \"\"\"System Mode change.\"\"\"\n if value == 0:\n prog_mode = self.ProgrammingOperationMode.Schedule_programming_mode\n elif value == 1:\n prog_mode = self.ProgrammingOperationMode.Simple\n else:\n prog_mode = self.ProgrammingOperationMode.Simple\n\n self._update_attribute(self.attridx[\"system_mode\"], self.SystemMode.Heat)\n self._update_attribute(self.attridx[\"programing_oper_mode\"], prog_mode)\n\n\nclass ZONNSMARTUserInterface(TuyaUserInterfaceCluster):\n \"\"\"HVAC User interface cluster for tuya electric heating thermostats.\"\"\"\n\n _CHILD_LOCK_ATTR = ZONNSMART_CHILD_LOCK_ATTR\n\n\nclass SiterwellGS361_Type1(TuyaThermostat):\n \"\"\"SiterwellGS361 Thermostatic radiator valve and clones.\"\"\"\n\n signature = {\n # endpoint=1 profile=260 device_type=0 device_version=0 input_clusters=[0, 3]\n # output_clusters=[3, 25]>\n MODELS_INFO: [\n (\"_TYST11_jeaxp72v\", \"eaxp72v\"),\n (\"_TYST11_kfvq6avy\", \"fvq6avy\"),\n (\"_TYST11_zivfvd7h\", \"ivfvd7h\"),\n (\"_TYST11_hhrtiq0x\", \"hrtiq0x\"),\n (\"_TYST11_ps5v5jor\", \"s5v5jor\"),\n (\"_TYST11_owwdxjbx\", \"wwdxjbx\"),\n (\"_TYST11_8daqwrsj\", \"daqwrsj\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.ON_OFF_SWITCH,\n INPUT_CLUSTERS: [Basic.cluster_id, Identify.cluster_id],\n OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.THERMOSTAT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n SiterwellManufCluster,\n SiterwellThermostat,\n SiterwellUserInterface,\n TuyaPowerConfigurationCluster,\n ],\n OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id],\n }\n }\n }\n\n\nclass SiterwellGS361_Type2(TuyaThermostat):\n \"\"\"SiterwellGS361 Thermostatic radiator valve and clones (2nd cluster signature).\"\"\"\n\n signature = {\n # endpoint=1 profile=260 device_type=81 device_version=0 input_clusters=[0, 4, 5, 61184]\n # output_clusters=[10, 25]>\n MODELS_INFO: [\n (\"_TZE200_jeaxp72v\", \"TS0601\"),\n (\"_TZE200_kfvq6avy\", \"TS0601\"),\n (\"_TZE200_zivfvd7h\", \"TS0601\"),\n (\"_TZE200_hhrtiq0x\", \"TS0601\"),\n (\"_TZE200_ps5v5jor\", \"TS0601\"),\n (\"_TZE200_owwdxjbx\", \"TS0601\"),\n (\"_TZE200_8daqwrsj\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TuyaManufClusterAttributes.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.THERMOSTAT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n SiterwellManufCluster,\n SiterwellThermostat,\n SiterwellUserInterface,\n TuyaPowerConfigurationCluster,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n }\n }\n }\n\n\nclass MoesHY368_Type1(TuyaThermostat):\n \"\"\"MoesHY368 Thermostatic radiator valve.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Init device.\"\"\"\n self.window_detection_bus = Bus()\n super().__init__(*args, **kwargs)\n\n signature = {\n # endpoint=1 profile=260 device_type=81 device_version=0 input_clusters=[0, 4, 5, 61184]\n # output_clusters=[10, 25]>\n MODELS_INFO: [\n (\"_TZE200_ckud7u2l\", \"TS0601\"),\n (\"_TZE200_ywdxldoj\", \"TS0601\"),\n (\"_TZE200_cwnjrr72\", \"TS0601\"),\n (\"_TZE200_b6wax7g0\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TuyaManufClusterAttributes.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.THERMOSTAT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n MoesManufCluster,\n MoesThermostat,\n MoesUserInterface,\n MoesWindowDetection,\n TuyaPowerConfigurationCluster,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n }\n }\n }\n\n\nclass MoesHY368_Type2(TuyaThermostat):\n \"\"\"MoesHY368 Thermostatic radiator valve (2nd cluster signature).\"\"\"\n\n signature = {\n # endpoint=1 profile=260 device_type=0 device_version=0 input_clusters=[0, 3]\n # output_clusters=[3, 25]>\n MODELS_INFO: [\n (\"_TYST11_ckud7u2l\", \"kud7u2l\"),\n (\"_TYST11_ywdxldoj\", \"wdxldoj\"),\n (\"_TYST11_cwnjrr72\", \"wnjrr72\"),\n (\"_TYST11_b6wax7g0\", \"6wax7g0\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.ON_OFF_SWITCH,\n INPUT_CLUSTERS: [Basic.cluster_id, Identify.cluster_id],\n OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.THERMOSTAT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n MoesManufCluster,\n MoesThermostat,\n MoesUserInterface,\n MoesWindowDetection,\n TuyaPowerConfigurationCluster,\n ],\n OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id],\n }\n }\n }\n\n\nclass ZonnsmartTV01_ZG(TuyaThermostat):\n \"\"\"ZONNSMART TV01-ZG Thermostatic radiator valve.\"\"\"\n\n signature = {\n # endpoint=1 profile=260 device_type=81 device_version=0 input_clusters=[0, 4, 5, 61184]\n # output_clusters=[10, 25]>\n MODELS_INFO: [\n (\"_TZE200_e9ba97vf\", \"TS0601\"),\n (\"_TZE200_husqqvux\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TuyaManufClusterAttributes.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.THERMOSTAT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n ZONNSMARTManufCluster,\n ZONNSMARTThermostat,\n ZONNSMARTUserInterface,\n TuyaPowerConfigurationCluster,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n }\n }\n }\n",
"path": "zhaquirks/tuya/valve.py"
}
] | diff --git a/zhaquirks/tuya/valve.py b/zhaquirks/tuya/valve.py
index 6da0470180..df9627b8f1 100644
--- a/zhaquirks/tuya/valve.py
+++ b/zhaquirks/tuya/valve.py
@@ -1077,6 +1077,7 @@ class ZonnsmartTV01_ZG(TuyaThermostat):
# output_clusters=[10, 25]>
MODELS_INFO: [
("_TZE200_e9ba97vf", "TS0601"),
+ ("_TZE200_husqqvux", "TS0601"),
],
ENDPOINTS: {
1: {
|
liqd__a4-meinberlin-1813 | Red input fields on first page load
When loading some pages, input fields are redish, suggesting there where validation errors, but there was not input submit yet.

| [
{
"content": "import re\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import EmailValidator\nfrom django.forms import widgets\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass CommaSeparatedEmailField(forms.Field):\n email_validator = EmailValidator(\n message=_('Please enter correct email addresses, separated by '\n 'commas.')\n )\n\n widget = widgets.TextInput(attrs={\n 'placeholder': '[email protected], [email protected], '\n '[email protected],…'\n })\n\n def to_python(self, value):\n if not value:\n return []\n\n emails = []\n for email in value.split(','):\n email = email.strip()\n self.email_validator(email)\n emails.append(email)\n\n return emails\n\n\nclass EmailFileField(forms.FileField):\n \"\"\"Extract emails from uploaded text files.\"\"\"\n\n widget = widgets.FileInput\n # Find possible email strings. Emails may be quoted and separated by\n # whitespaces, commas, semicolons or < and >.\n email_regex = re.compile(r'[^\\s;,\"\\'<]+@[^\\s;,\"\\'>]+\\.[a-z]{2,}')\n email_validator = EmailValidator()\n\n def clean(self, data, initial=None):\n file = super().clean(data, initial)\n return self._extract_emails(file)\n\n def _extract_emails(self, file):\n if not file:\n return []\n\n emails = []\n for byteline in file:\n # As it is difficult to guess the correct encoding of a file,\n # email addresses are restricted to contain only ascii letters.\n # This works for every encoding which is a superset of ascii like\n # utf-8 and latin-1. Non ascii chars are simply ignored.\n line = byteline.decode('ascii', 'ignore')\n for match in self.email_regex.finditer(line):\n email = match.group(0)\n if self.is_valid_email(email):\n emails.append(email)\n return emails\n\n def is_valid_email(self, email):\n try:\n self.email_validator(email)\n return True\n except ValidationError:\n return False\n",
"path": "meinberlin/apps/users/fields.py"
}
] | [
{
"content": "import re\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import EmailValidator\nfrom django.forms import widgets\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass CommaSeparatedEmailField(forms.Field):\n email_validator = EmailValidator(\n message=_('Please enter correct email addresses, separated by '\n 'commas.')\n )\n\n widget = widgets.TextInput(attrs={\n 'placeholder': '[email protected], [email protected],…'\n })\n\n def to_python(self, value):\n if not value:\n return []\n\n emails = []\n for email in value.split(','):\n email = email.strip()\n self.email_validator(email)\n emails.append(email)\n\n return emails\n\n\nclass EmailFileField(forms.FileField):\n \"\"\"Extract emails from uploaded text files.\"\"\"\n\n widget = widgets.FileInput\n # Find possible email strings. Emails may be quoted and separated by\n # whitespaces, commas, semicolons or < and >.\n email_regex = re.compile(r'[^\\s;,\"\\'<]+@[^\\s;,\"\\'>]+\\.[a-z]{2,}')\n email_validator = EmailValidator()\n\n def clean(self, data, initial=None):\n file = super().clean(data, initial)\n return self._extract_emails(file)\n\n def _extract_emails(self, file):\n if not file:\n return []\n\n emails = []\n for byteline in file:\n # As it is difficult to guess the correct encoding of a file,\n # email addresses are restricted to contain only ascii letters.\n # This works for every encoding which is a superset of ascii like\n # utf-8 and latin-1. Non ascii chars are simply ignored.\n line = byteline.decode('ascii', 'ignore')\n for match in self.email_regex.finditer(line):\n email = match.group(0)\n if self.is_valid_email(email):\n emails.append(email)\n return emails\n\n def is_valid_email(self, email):\n try:\n self.email_validator(email)\n return True\n except ValidationError:\n return False\n",
"path": "meinberlin/apps/users/fields.py"
}
] | diff --git a/meinberlin/apps/projects/templates/meinberlin_projects/includes/users_from_email_form.html b/meinberlin/apps/projects/templates/meinberlin_projects/includes/users_from_email_form.html
index 62912b13aa..344d29b297 100644
--- a/meinberlin/apps/projects/templates/meinberlin_projects/includes/users_from_email_form.html
+++ b/meinberlin/apps/projects/templates/meinberlin_projects/includes/users_from_email_form.html
@@ -21,7 +21,7 @@
{% endif %}
<div class="input-group">
<div class="input-group__input widget widget--{{ form.add_users|widget_type }}">
- {{ form.add_users|attr:'required' }}
+ {{ form.add_users }}
</div>
<span>
<input type="submit" class="btn btn--primary input-group__after" value="{% trans 'Add' %}"/>
@@ -42,7 +42,7 @@
</div>
<div class="widget widget--{{ form.add_users_upload|widget_type }}">
<div class="input-group">
- {{ form.add_users_upload|attr:'required'|add_class:'input-group__input' }}
+ {{ form.add_users_upload|add_class:'input-group__input' }}
<span>
<input type="submit" class="btn btn--primary input-group__after" value="{% trans 'Upload' %}"/>
</span>
diff --git a/meinberlin/apps/users/fields.py b/meinberlin/apps/users/fields.py
index 37b0152e1a..671f1d716c 100644
--- a/meinberlin/apps/users/fields.py
+++ b/meinberlin/apps/users/fields.py
@@ -14,8 +14,7 @@ class CommaSeparatedEmailField(forms.Field):
)
widget = widgets.TextInput(attrs={
- 'placeholder': '[email protected], [email protected], '
- '[email protected],…'
+ 'placeholder': '[email protected], [email protected],…'
})
def to_python(self, value):
|
ckan__ckan-7881 | Invalid session timeout value on CKAN 2.10 (logged out users unexpectedly)
## CKAN version
2.10
## Describe the bug
According to our config declaration for [`beaker.session.timeout`](https://github.com/ckan/ckan/blob/656a39de2e7ed0ce47e15080f0f5d42b66b4929b/ckan/config/config_declaration.yaml#L306):
> Defaults to never expiring.
But the defined default value is 600 :upside_down_face:
Apart from the inconsistency, this is problematic because now that the logged-in user id is stored in the session by Flask-login, this means that users are logged out every 10 minutes.
The fix is to default it to never expire as described on the docs (which is also the [Beaker default](https://beaker.readthedocs.io/en/latest/configuration.html#session-options)), but the problem is that I can set it to `None` because then Beaker complains that the value is not an int:
```
File "/home/adria/dev/pyenvs/gates/lib/python3.8/site-packages/beaker/util.py", line 290, in verify_rules
params[key] = verify_options(params[key], types, message)
File "/home/adria/dev/pyenvs/gates/lib/python3.8/site-packages/beaker/util.py", line 281, in verify_options
raise Exception(error)
Exception: Session timeout must be an integer.
```
This is because our config parsing does not support "int or None", and leaves the string "None" as the value. I guess the alternative is to put a really big number but would be good to handle it better.
| [
{
"content": "# encoding: utf-8\nimport click\nimport logging\n\nimport ckan.model as model\n\nfrom typing import Any, Mapping\n\nfrom ckan.plugins import toolkit\n\n\nlog = logging.getLogger(__name__)\n\n\n_banner = \"\"\"\n****** Welcome to the CKAN shell ******\n\nThis session has some variables pre-populated:\n - app (CKAN Application object)\n - config (CKAN config dictionary)\n - model (CKAN model module to access the Database)\n - toolkit (CKAN toolkit module)\n \"\"\"\n\n\ndef ipython(namespace: Mapping[str, Any], banner: str) -> None:\n import IPython\n from traitlets.config.loader import Config\n\n c = Config()\n c.TerminalInteractiveShell.banner2 = banner # type: ignore\n\n IPython.start_ipython([], user_ns=namespace, config=c)\n\n\ndef python(namespace: Mapping[str, Any], banner: str) -> None:\n import code\n code.interact(banner=banner, local=namespace)\n\n\[email protected]()\[email protected]_option(\"-h\", \"--help\")\[email protected]_context\ndef shell(ctx: click.Context):\n \"\"\"Run an interactive IPython shell with the context of the\n CKAN instance.\n\n It will try to use IPython, if not installed it will callback\n to the default Python's shell.\n \"\"\"\n\n namespace = {\n \"app\": ctx.obj.app._wsgi_app,\n \"model\": model,\n \"config\": ctx.obj.config,\n \"toolkit\": toolkit,\n }\n\n try:\n ipython(namespace, _banner)\n except ImportError:\n log.debug(\"`ipython` library is missing. Using default python shell.\")\n python(namespace, _banner)\n",
"path": "ckan/cli/shell.py"
}
] | [
{
"content": "# encoding: utf-8\nimport click\nimport logging\n\nimport ckan.model as model\n\nfrom typing import Any, Mapping\n\nfrom ckan.plugins import toolkit\n\n\nlog = logging.getLogger(__name__)\n\n\n_banner = \"\"\"\n****** Welcome to the CKAN shell ******\n\nThis session has some variables pre-populated:\n - app (CKAN Application object)\n - config (CKAN config dictionary)\n - model (CKAN model module to access the Database)\n - toolkit (CKAN toolkit module)\n \"\"\"\n\n\ndef ipython(namespace: Mapping[str, Any], banner: str) -> None:\n import IPython\n from traitlets.config.loader import Config\n\n c = Config()\n c.TerminalInteractiveShell.banner2 = banner\n\n IPython.start_ipython([], user_ns=namespace, config=c)\n\n\ndef python(namespace: Mapping[str, Any], banner: str) -> None:\n import code\n code.interact(banner=banner, local=namespace)\n\n\[email protected]()\[email protected]_option(\"-h\", \"--help\")\[email protected]_context\ndef shell(ctx: click.Context):\n \"\"\"Run an interactive IPython shell with the context of the\n CKAN instance.\n\n It will try to use IPython, if not installed it will callback\n to the default Python's shell.\n \"\"\"\n\n namespace = {\n \"app\": ctx.obj.app._wsgi_app,\n \"model\": model,\n \"config\": ctx.obj.config,\n \"toolkit\": toolkit,\n }\n\n try:\n ipython(namespace, _banner)\n except ImportError:\n log.debug(\"`ipython` library is missing. Using default python shell.\")\n python(namespace, _banner)\n",
"path": "ckan/cli/shell.py"
}
] | diff --git a/changes/7881.bugfix b/changes/7881.bugfix
new file mode 100644
index 00000000000..c435b5d3903
--- /dev/null
+++ b/changes/7881.bugfix
@@ -0,0 +1 @@
+Empty string in `beaker.session.timeout` produces an error instead of never-expiring session
diff --git a/ckan/cli/shell.py b/ckan/cli/shell.py
index 7fffef529ca..29373020e95 100644
--- a/ckan/cli/shell.py
+++ b/ckan/cli/shell.py
@@ -28,7 +28,7 @@ def ipython(namespace: Mapping[str, Any], banner: str) -> None:
from traitlets.config.loader import Config
c = Config()
- c.TerminalInteractiveShell.banner2 = banner # type: ignore
+ c.TerminalInteractiveShell.banner2 = banner
IPython.start_ipython([], user_ns=namespace, config=c)
diff --git a/ckan/config/config_declaration.yaml b/ckan/config/config_declaration.yaml
index b73cc2d2cfb..1bf6376aa91 100644
--- a/ckan/config/config_declaration.yaml
+++ b/ckan/config/config_declaration.yaml
@@ -312,8 +312,7 @@ groups:
browsers are instructed to not send the cookie over anything other than an SSL connection.
- key: beaker.session.timeout
- type: int
- default: 600
+ validators: int_validator
description: |
Seconds until the session is considered invalid, after which it will be ignored and invalidated.
This number is based on the time since the session was last accessed, not from when the session was created.
diff --git a/ckan/tests/config/test_sessions.py b/ckan/tests/config/test_sessions.py
index 1023a8d2b47..4fb87b3aeb4 100644
--- a/ckan/tests/config/test_sessions.py
+++ b/ckan/tests/config/test_sessions.py
@@ -82,3 +82,21 @@ def get_blueprint(self):
blueprint.add_url_rule(*rule)
return blueprint
+
+
[email protected]("timeout,normalized", [
+ (None, None),
+ ("", None),
+ ("123", 123),
+ ("1_000_000", 1_000_000),
+ ("-1", -1),
+])
+def test_beaker_session_timeout(
+ monkeypatch, ckan_config, make_app, timeout, normalized
+):
+ """Beaker timeout accepts `None`(never expires) and int(expires in
+ n-seconds) values.
+
+ """
+ monkeypatch.setitem(ckan_config, "beaker.session.timeout", timeout)
+ make_app()
|
googleapis__google-auth-library-python-1330 | impersonated_credentials.py should use UTC
ID tokens had an issue where the `fromtimestamp` API was used instead of `utcfromtimestamp`. Ref: https://github.com/googleapis/google-auth-library-python/issues/1323.
It appears that `impersonated_credentials.py` uses the same API, and is likely impacted by the same issue.
```
➜ rg "\.fromtimestamp" -g '!*test*'
google/auth/compute_engine/credentials.py
392: return id_token, datetime.datetime.fromtimestamp(payload["exp"])
google/auth/impersonated_credentials.py
457: self.expiry = datetime.fromtimestamp(jwt.decode(id_token, verify=False)["exp"])
```
`google/auth/impersonated_credentials.py` should be updated to use `utcfromtimestamp` instead of `fromtimestamp`.
| [
{
"content": "# Copyright 2018 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google Cloud Impersonated credentials.\n\nThis module provides authentication for applications where local credentials\nimpersonates a remote service account using `IAM Credentials API`_.\n\nThis class can be used to impersonate a service account as long as the original\nCredential object has the \"Service Account Token Creator\" role on the target\nservice account.\n\n .. _IAM Credentials API:\n https://cloud.google.com/iam/credentials/reference/rest/\n\"\"\"\n\nimport base64\nimport copy\nfrom datetime import datetime\nimport json\n\nimport six\nfrom six.moves import http_client\n\nfrom google.auth import _helpers\nfrom google.auth import credentials\nfrom google.auth import exceptions\nfrom google.auth import jwt\nfrom google.auth import metrics\n\n_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds\n\n_IAM_SCOPE = [\"https://www.googleapis.com/auth/iam\"]\n\n_IAM_ENDPOINT = (\n \"https://iamcredentials.googleapis.com/v1/projects/-\"\n + \"/serviceAccounts/{}:generateAccessToken\"\n)\n\n_IAM_SIGN_ENDPOINT = (\n \"https://iamcredentials.googleapis.com/v1/projects/-\"\n + \"/serviceAccounts/{}:signBlob\"\n)\n\n_IAM_IDTOKEN_ENDPOINT = (\n \"https://iamcredentials.googleapis.com/v1/\"\n + \"projects/-/serviceAccounts/{}:generateIdToken\"\n)\n\n_REFRESH_ERROR = \"Unable to acquire impersonated credentials\"\n\n_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds\n\n_DEFAULT_TOKEN_URI = \"https://oauth2.googleapis.com/token\"\n\n\ndef _make_iam_token_request(\n request, principal, headers, body, iam_endpoint_override=None\n):\n \"\"\"Makes a request to the Google Cloud IAM service for an access token.\n Args:\n request (Request): The Request object to use.\n principal (str): The principal to request an access token for.\n headers (Mapping[str, str]): Map of headers to transmit.\n body (Mapping[str, str]): JSON Payload body for the iamcredentials\n API call.\n iam_endpoint_override (Optiona[str]): The full IAM endpoint override\n with the target_principal embedded. This is useful when supporting\n impersonation with regional endpoints.\n\n Raises:\n google.auth.exceptions.TransportError: Raised if there is an underlying\n HTTP connection error\n google.auth.exceptions.RefreshError: Raised if the impersonated\n credentials are not available. Common reasons are\n `iamcredentials.googleapis.com` is not enabled or the\n `Service Account Token Creator` is not assigned\n \"\"\"\n iam_endpoint = iam_endpoint_override or _IAM_ENDPOINT.format(principal)\n\n body = json.dumps(body).encode(\"utf-8\")\n\n response = request(url=iam_endpoint, method=\"POST\", headers=headers, body=body)\n\n # support both string and bytes type response.data\n response_body = (\n response.data.decode(\"utf-8\")\n if hasattr(response.data, \"decode\")\n else response.data\n )\n\n if response.status != http_client.OK:\n raise exceptions.RefreshError(_REFRESH_ERROR, response_body)\n\n try:\n token_response = json.loads(response_body)\n token = token_response[\"accessToken\"]\n expiry = datetime.strptime(token_response[\"expireTime\"], \"%Y-%m-%dT%H:%M:%SZ\")\n\n return token, expiry\n\n except (KeyError, ValueError) as caught_exc:\n new_exc = exceptions.RefreshError(\n \"{}: No access token or invalid expiration in response.\".format(\n _REFRESH_ERROR\n ),\n response_body,\n )\n six.raise_from(new_exc, caught_exc)\n\n\nclass Credentials(\n credentials.Scoped, credentials.CredentialsWithQuotaProject, credentials.Signing\n):\n \"\"\"This module defines impersonated credentials which are essentially\n impersonated identities.\n\n Impersonated Credentials allows credentials issued to a user or\n service account to impersonate another. The target service account must\n grant the originating credential principal the\n `Service Account Token Creator`_ IAM role:\n\n For more information about Token Creator IAM role and\n IAMCredentials API, see\n `Creating Short-Lived Service Account Credentials`_.\n\n .. _Service Account Token Creator:\n https://cloud.google.com/iam/docs/service-accounts#the_service_account_token_creator_role\n\n .. _Creating Short-Lived Service Account Credentials:\n https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials\n\n Usage:\n\n First grant source_credentials the `Service Account Token Creator`\n role on the target account to impersonate. In this example, the\n service account represented by svc_account.json has the\n token creator role on\n `impersonated-account@_project_.iam.gserviceaccount.com`.\n\n Enable the IAMCredentials API on the source project:\n `gcloud services enable iamcredentials.googleapis.com`.\n\n Initialize a source credential which does not have access to\n list bucket::\n\n from google.oauth2 import service_account\n\n target_scopes = [\n 'https://www.googleapis.com/auth/devstorage.read_only']\n\n source_credentials = (\n service_account.Credentials.from_service_account_file(\n '/path/to/svc_account.json',\n scopes=target_scopes))\n\n Now use the source credentials to acquire credentials to impersonate\n another service account::\n\n from google.auth import impersonated_credentials\n\n target_credentials = impersonated_credentials.Credentials(\n source_credentials=source_credentials,\n target_principal='impersonated-account@_project_.iam.gserviceaccount.com',\n target_scopes = target_scopes,\n lifetime=500)\n\n Resource access is granted::\n\n client = storage.Client(credentials=target_credentials)\n buckets = client.list_buckets(project='your_project')\n for bucket in buckets:\n print(bucket.name)\n \"\"\"\n\n def __init__(\n self,\n source_credentials,\n target_principal,\n target_scopes,\n delegates=None,\n lifetime=_DEFAULT_TOKEN_LIFETIME_SECS,\n quota_project_id=None,\n iam_endpoint_override=None,\n ):\n \"\"\"\n Args:\n source_credentials (google.auth.Credentials): The source credential\n used as to acquire the impersonated credentials.\n target_principal (str): The service account to impersonate.\n target_scopes (Sequence[str]): Scopes to request during the\n authorization grant.\n delegates (Sequence[str]): The chained list of delegates required\n to grant the final access_token. If set, the sequence of\n identities must have \"Service Account Token Creator\" capability\n granted to the prceeding identity. For example, if set to\n [serviceAccountB, serviceAccountC], the source_credential\n must have the Token Creator role on serviceAccountB.\n serviceAccountB must have the Token Creator on\n serviceAccountC.\n Finally, C must have Token Creator on target_principal.\n If left unset, source_credential must have that role on\n target_principal.\n lifetime (int): Number of seconds the delegated credential should\n be valid for (upto 3600).\n quota_project_id (Optional[str]): The project ID used for quota and billing.\n This project may be different from the project used to\n create the credentials.\n iam_endpoint_override (Optiona[str]): The full IAM endpoint override\n with the target_principal embedded. This is useful when supporting\n impersonation with regional endpoints.\n \"\"\"\n\n super(Credentials, self).__init__()\n\n self._source_credentials = copy.copy(source_credentials)\n # Service account source credentials must have the _IAM_SCOPE\n # added to refresh correctly. User credentials cannot have\n # their original scopes modified.\n if isinstance(self._source_credentials, credentials.Scoped):\n self._source_credentials = self._source_credentials.with_scopes(_IAM_SCOPE)\n self._target_principal = target_principal\n self._target_scopes = target_scopes\n self._delegates = delegates\n self._lifetime = lifetime or _DEFAULT_TOKEN_LIFETIME_SECS\n self.token = None\n self.expiry = _helpers.utcnow()\n self._quota_project_id = quota_project_id\n self._iam_endpoint_override = iam_endpoint_override\n\n def _metric_header_for_usage(self):\n return metrics.CRED_TYPE_SA_IMPERSONATE\n\n @_helpers.copy_docstring(credentials.Credentials)\n def refresh(self, request):\n self._update_token(request)\n\n def _update_token(self, request):\n \"\"\"Updates credentials with a new access_token representing\n the impersonated account.\n\n Args:\n request (google.auth.transport.requests.Request): Request object\n to use for refreshing credentials.\n \"\"\"\n\n # Refresh our source credentials if it is not valid.\n if not self._source_credentials.valid:\n self._source_credentials.refresh(request)\n\n body = {\n \"delegates\": self._delegates,\n \"scope\": self._target_scopes,\n \"lifetime\": str(self._lifetime) + \"s\",\n }\n\n headers = {\n \"Content-Type\": \"application/json\",\n metrics.API_CLIENT_HEADER: metrics.token_request_access_token_impersonate(),\n }\n\n # Apply the source credentials authentication info.\n self._source_credentials.apply(headers)\n\n self.token, self.expiry = _make_iam_token_request(\n request=request,\n principal=self._target_principal,\n headers=headers,\n body=body,\n iam_endpoint_override=self._iam_endpoint_override,\n )\n\n def sign_bytes(self, message):\n from google.auth.transport.requests import AuthorizedSession\n\n iam_sign_endpoint = _IAM_SIGN_ENDPOINT.format(self._target_principal)\n\n body = {\n \"payload\": base64.b64encode(message).decode(\"utf-8\"),\n \"delegates\": self._delegates,\n }\n\n headers = {\"Content-Type\": \"application/json\"}\n\n authed_session = AuthorizedSession(self._source_credentials)\n\n try:\n response = authed_session.post(\n url=iam_sign_endpoint, headers=headers, json=body\n )\n finally:\n authed_session.close()\n\n if response.status_code != http_client.OK:\n raise exceptions.TransportError(\n \"Error calling sign_bytes: {}\".format(response.json())\n )\n\n return base64.b64decode(response.json()[\"signedBlob\"])\n\n @property\n def signer_email(self):\n return self._target_principal\n\n @property\n def service_account_email(self):\n return self._target_principal\n\n @property\n def signer(self):\n return self\n\n @property\n def requires_scopes(self):\n return not self._target_scopes\n\n @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)\n def with_quota_project(self, quota_project_id):\n return self.__class__(\n self._source_credentials,\n target_principal=self._target_principal,\n target_scopes=self._target_scopes,\n delegates=self._delegates,\n lifetime=self._lifetime,\n quota_project_id=quota_project_id,\n iam_endpoint_override=self._iam_endpoint_override,\n )\n\n @_helpers.copy_docstring(credentials.Scoped)\n def with_scopes(self, scopes, default_scopes=None):\n return self.__class__(\n self._source_credentials,\n target_principal=self._target_principal,\n target_scopes=scopes or default_scopes,\n delegates=self._delegates,\n lifetime=self._lifetime,\n quota_project_id=self._quota_project_id,\n iam_endpoint_override=self._iam_endpoint_override,\n )\n\n\nclass IDTokenCredentials(credentials.CredentialsWithQuotaProject):\n \"\"\"Open ID Connect ID Token-based service account credentials.\n\n \"\"\"\n\n def __init__(\n self,\n target_credentials,\n target_audience=None,\n include_email=False,\n quota_project_id=None,\n ):\n \"\"\"\n Args:\n target_credentials (google.auth.Credentials): The target\n credential used as to acquire the id tokens for.\n target_audience (string): Audience to issue the token for.\n include_email (bool): Include email in IdToken\n quota_project_id (Optional[str]): The project ID used for\n quota and billing.\n \"\"\"\n super(IDTokenCredentials, self).__init__()\n\n if not isinstance(target_credentials, Credentials):\n raise exceptions.GoogleAuthError(\n \"Provided Credential must be \" \"impersonated_credentials\"\n )\n self._target_credentials = target_credentials\n self._target_audience = target_audience\n self._include_email = include_email\n self._quota_project_id = quota_project_id\n\n def from_credentials(self, target_credentials, target_audience=None):\n return self.__class__(\n target_credentials=target_credentials,\n target_audience=target_audience,\n include_email=self._include_email,\n quota_project_id=self._quota_project_id,\n )\n\n def with_target_audience(self, target_audience):\n return self.__class__(\n target_credentials=self._target_credentials,\n target_audience=target_audience,\n include_email=self._include_email,\n quota_project_id=self._quota_project_id,\n )\n\n def with_include_email(self, include_email):\n return self.__class__(\n target_credentials=self._target_credentials,\n target_audience=self._target_audience,\n include_email=include_email,\n quota_project_id=self._quota_project_id,\n )\n\n @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)\n def with_quota_project(self, quota_project_id):\n return self.__class__(\n target_credentials=self._target_credentials,\n target_audience=self._target_audience,\n include_email=self._include_email,\n quota_project_id=quota_project_id,\n )\n\n @_helpers.copy_docstring(credentials.Credentials)\n def refresh(self, request):\n from google.auth.transport.requests import AuthorizedSession\n\n iam_sign_endpoint = _IAM_IDTOKEN_ENDPOINT.format(\n self._target_credentials.signer_email\n )\n\n body = {\n \"audience\": self._target_audience,\n \"delegates\": self._target_credentials._delegates,\n \"includeEmail\": self._include_email,\n }\n\n headers = {\n \"Content-Type\": \"application/json\",\n metrics.API_CLIENT_HEADER: metrics.token_request_id_token_impersonate(),\n }\n\n authed_session = AuthorizedSession(\n self._target_credentials._source_credentials, auth_request=request\n )\n\n try:\n response = authed_session.post(\n url=iam_sign_endpoint,\n headers=headers,\n data=json.dumps(body).encode(\"utf-8\"),\n )\n finally:\n authed_session.close()\n\n if response.status_code != http_client.OK:\n raise exceptions.RefreshError(\n \"Error getting ID token: {}\".format(response.json())\n )\n\n id_token = response.json()[\"token\"]\n self.token = id_token\n self.expiry = datetime.fromtimestamp(jwt.decode(id_token, verify=False)[\"exp\"])\n",
"path": "google/auth/impersonated_credentials.py"
}
] | [
{
"content": "# Copyright 2018 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google Cloud Impersonated credentials.\n\nThis module provides authentication for applications where local credentials\nimpersonates a remote service account using `IAM Credentials API`_.\n\nThis class can be used to impersonate a service account as long as the original\nCredential object has the \"Service Account Token Creator\" role on the target\nservice account.\n\n .. _IAM Credentials API:\n https://cloud.google.com/iam/credentials/reference/rest/\n\"\"\"\n\nimport base64\nimport copy\nfrom datetime import datetime\nimport json\n\nimport six\nfrom six.moves import http_client\n\nfrom google.auth import _helpers\nfrom google.auth import credentials\nfrom google.auth import exceptions\nfrom google.auth import jwt\nfrom google.auth import metrics\n\n_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds\n\n_IAM_SCOPE = [\"https://www.googleapis.com/auth/iam\"]\n\n_IAM_ENDPOINT = (\n \"https://iamcredentials.googleapis.com/v1/projects/-\"\n + \"/serviceAccounts/{}:generateAccessToken\"\n)\n\n_IAM_SIGN_ENDPOINT = (\n \"https://iamcredentials.googleapis.com/v1/projects/-\"\n + \"/serviceAccounts/{}:signBlob\"\n)\n\n_IAM_IDTOKEN_ENDPOINT = (\n \"https://iamcredentials.googleapis.com/v1/\"\n + \"projects/-/serviceAccounts/{}:generateIdToken\"\n)\n\n_REFRESH_ERROR = \"Unable to acquire impersonated credentials\"\n\n_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds\n\n_DEFAULT_TOKEN_URI = \"https://oauth2.googleapis.com/token\"\n\n\ndef _make_iam_token_request(\n request, principal, headers, body, iam_endpoint_override=None\n):\n \"\"\"Makes a request to the Google Cloud IAM service for an access token.\n Args:\n request (Request): The Request object to use.\n principal (str): The principal to request an access token for.\n headers (Mapping[str, str]): Map of headers to transmit.\n body (Mapping[str, str]): JSON Payload body for the iamcredentials\n API call.\n iam_endpoint_override (Optiona[str]): The full IAM endpoint override\n with the target_principal embedded. This is useful when supporting\n impersonation with regional endpoints.\n\n Raises:\n google.auth.exceptions.TransportError: Raised if there is an underlying\n HTTP connection error\n google.auth.exceptions.RefreshError: Raised if the impersonated\n credentials are not available. Common reasons are\n `iamcredentials.googleapis.com` is not enabled or the\n `Service Account Token Creator` is not assigned\n \"\"\"\n iam_endpoint = iam_endpoint_override or _IAM_ENDPOINT.format(principal)\n\n body = json.dumps(body).encode(\"utf-8\")\n\n response = request(url=iam_endpoint, method=\"POST\", headers=headers, body=body)\n\n # support both string and bytes type response.data\n response_body = (\n response.data.decode(\"utf-8\")\n if hasattr(response.data, \"decode\")\n else response.data\n )\n\n if response.status != http_client.OK:\n raise exceptions.RefreshError(_REFRESH_ERROR, response_body)\n\n try:\n token_response = json.loads(response_body)\n token = token_response[\"accessToken\"]\n expiry = datetime.strptime(token_response[\"expireTime\"], \"%Y-%m-%dT%H:%M:%SZ\")\n\n return token, expiry\n\n except (KeyError, ValueError) as caught_exc:\n new_exc = exceptions.RefreshError(\n \"{}: No access token or invalid expiration in response.\".format(\n _REFRESH_ERROR\n ),\n response_body,\n )\n six.raise_from(new_exc, caught_exc)\n\n\nclass Credentials(\n credentials.Scoped, credentials.CredentialsWithQuotaProject, credentials.Signing\n):\n \"\"\"This module defines impersonated credentials which are essentially\n impersonated identities.\n\n Impersonated Credentials allows credentials issued to a user or\n service account to impersonate another. The target service account must\n grant the originating credential principal the\n `Service Account Token Creator`_ IAM role:\n\n For more information about Token Creator IAM role and\n IAMCredentials API, see\n `Creating Short-Lived Service Account Credentials`_.\n\n .. _Service Account Token Creator:\n https://cloud.google.com/iam/docs/service-accounts#the_service_account_token_creator_role\n\n .. _Creating Short-Lived Service Account Credentials:\n https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials\n\n Usage:\n\n First grant source_credentials the `Service Account Token Creator`\n role on the target account to impersonate. In this example, the\n service account represented by svc_account.json has the\n token creator role on\n `impersonated-account@_project_.iam.gserviceaccount.com`.\n\n Enable the IAMCredentials API on the source project:\n `gcloud services enable iamcredentials.googleapis.com`.\n\n Initialize a source credential which does not have access to\n list bucket::\n\n from google.oauth2 import service_account\n\n target_scopes = [\n 'https://www.googleapis.com/auth/devstorage.read_only']\n\n source_credentials = (\n service_account.Credentials.from_service_account_file(\n '/path/to/svc_account.json',\n scopes=target_scopes))\n\n Now use the source credentials to acquire credentials to impersonate\n another service account::\n\n from google.auth import impersonated_credentials\n\n target_credentials = impersonated_credentials.Credentials(\n source_credentials=source_credentials,\n target_principal='impersonated-account@_project_.iam.gserviceaccount.com',\n target_scopes = target_scopes,\n lifetime=500)\n\n Resource access is granted::\n\n client = storage.Client(credentials=target_credentials)\n buckets = client.list_buckets(project='your_project')\n for bucket in buckets:\n print(bucket.name)\n \"\"\"\n\n def __init__(\n self,\n source_credentials,\n target_principal,\n target_scopes,\n delegates=None,\n lifetime=_DEFAULT_TOKEN_LIFETIME_SECS,\n quota_project_id=None,\n iam_endpoint_override=None,\n ):\n \"\"\"\n Args:\n source_credentials (google.auth.Credentials): The source credential\n used as to acquire the impersonated credentials.\n target_principal (str): The service account to impersonate.\n target_scopes (Sequence[str]): Scopes to request during the\n authorization grant.\n delegates (Sequence[str]): The chained list of delegates required\n to grant the final access_token. If set, the sequence of\n identities must have \"Service Account Token Creator\" capability\n granted to the prceeding identity. For example, if set to\n [serviceAccountB, serviceAccountC], the source_credential\n must have the Token Creator role on serviceAccountB.\n serviceAccountB must have the Token Creator on\n serviceAccountC.\n Finally, C must have Token Creator on target_principal.\n If left unset, source_credential must have that role on\n target_principal.\n lifetime (int): Number of seconds the delegated credential should\n be valid for (upto 3600).\n quota_project_id (Optional[str]): The project ID used for quota and billing.\n This project may be different from the project used to\n create the credentials.\n iam_endpoint_override (Optiona[str]): The full IAM endpoint override\n with the target_principal embedded. This is useful when supporting\n impersonation with regional endpoints.\n \"\"\"\n\n super(Credentials, self).__init__()\n\n self._source_credentials = copy.copy(source_credentials)\n # Service account source credentials must have the _IAM_SCOPE\n # added to refresh correctly. User credentials cannot have\n # their original scopes modified.\n if isinstance(self._source_credentials, credentials.Scoped):\n self._source_credentials = self._source_credentials.with_scopes(_IAM_SCOPE)\n self._target_principal = target_principal\n self._target_scopes = target_scopes\n self._delegates = delegates\n self._lifetime = lifetime or _DEFAULT_TOKEN_LIFETIME_SECS\n self.token = None\n self.expiry = _helpers.utcnow()\n self._quota_project_id = quota_project_id\n self._iam_endpoint_override = iam_endpoint_override\n\n def _metric_header_for_usage(self):\n return metrics.CRED_TYPE_SA_IMPERSONATE\n\n @_helpers.copy_docstring(credentials.Credentials)\n def refresh(self, request):\n self._update_token(request)\n\n def _update_token(self, request):\n \"\"\"Updates credentials with a new access_token representing\n the impersonated account.\n\n Args:\n request (google.auth.transport.requests.Request): Request object\n to use for refreshing credentials.\n \"\"\"\n\n # Refresh our source credentials if it is not valid.\n if not self._source_credentials.valid:\n self._source_credentials.refresh(request)\n\n body = {\n \"delegates\": self._delegates,\n \"scope\": self._target_scopes,\n \"lifetime\": str(self._lifetime) + \"s\",\n }\n\n headers = {\n \"Content-Type\": \"application/json\",\n metrics.API_CLIENT_HEADER: metrics.token_request_access_token_impersonate(),\n }\n\n # Apply the source credentials authentication info.\n self._source_credentials.apply(headers)\n\n self.token, self.expiry = _make_iam_token_request(\n request=request,\n principal=self._target_principal,\n headers=headers,\n body=body,\n iam_endpoint_override=self._iam_endpoint_override,\n )\n\n def sign_bytes(self, message):\n from google.auth.transport.requests import AuthorizedSession\n\n iam_sign_endpoint = _IAM_SIGN_ENDPOINT.format(self._target_principal)\n\n body = {\n \"payload\": base64.b64encode(message).decode(\"utf-8\"),\n \"delegates\": self._delegates,\n }\n\n headers = {\"Content-Type\": \"application/json\"}\n\n authed_session = AuthorizedSession(self._source_credentials)\n\n try:\n response = authed_session.post(\n url=iam_sign_endpoint, headers=headers, json=body\n )\n finally:\n authed_session.close()\n\n if response.status_code != http_client.OK:\n raise exceptions.TransportError(\n \"Error calling sign_bytes: {}\".format(response.json())\n )\n\n return base64.b64decode(response.json()[\"signedBlob\"])\n\n @property\n def signer_email(self):\n return self._target_principal\n\n @property\n def service_account_email(self):\n return self._target_principal\n\n @property\n def signer(self):\n return self\n\n @property\n def requires_scopes(self):\n return not self._target_scopes\n\n @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)\n def with_quota_project(self, quota_project_id):\n return self.__class__(\n self._source_credentials,\n target_principal=self._target_principal,\n target_scopes=self._target_scopes,\n delegates=self._delegates,\n lifetime=self._lifetime,\n quota_project_id=quota_project_id,\n iam_endpoint_override=self._iam_endpoint_override,\n )\n\n @_helpers.copy_docstring(credentials.Scoped)\n def with_scopes(self, scopes, default_scopes=None):\n return self.__class__(\n self._source_credentials,\n target_principal=self._target_principal,\n target_scopes=scopes or default_scopes,\n delegates=self._delegates,\n lifetime=self._lifetime,\n quota_project_id=self._quota_project_id,\n iam_endpoint_override=self._iam_endpoint_override,\n )\n\n\nclass IDTokenCredentials(credentials.CredentialsWithQuotaProject):\n \"\"\"Open ID Connect ID Token-based service account credentials.\n\n \"\"\"\n\n def __init__(\n self,\n target_credentials,\n target_audience=None,\n include_email=False,\n quota_project_id=None,\n ):\n \"\"\"\n Args:\n target_credentials (google.auth.Credentials): The target\n credential used as to acquire the id tokens for.\n target_audience (string): Audience to issue the token for.\n include_email (bool): Include email in IdToken\n quota_project_id (Optional[str]): The project ID used for\n quota and billing.\n \"\"\"\n super(IDTokenCredentials, self).__init__()\n\n if not isinstance(target_credentials, Credentials):\n raise exceptions.GoogleAuthError(\n \"Provided Credential must be \" \"impersonated_credentials\"\n )\n self._target_credentials = target_credentials\n self._target_audience = target_audience\n self._include_email = include_email\n self._quota_project_id = quota_project_id\n\n def from_credentials(self, target_credentials, target_audience=None):\n return self.__class__(\n target_credentials=target_credentials,\n target_audience=target_audience,\n include_email=self._include_email,\n quota_project_id=self._quota_project_id,\n )\n\n def with_target_audience(self, target_audience):\n return self.__class__(\n target_credentials=self._target_credentials,\n target_audience=target_audience,\n include_email=self._include_email,\n quota_project_id=self._quota_project_id,\n )\n\n def with_include_email(self, include_email):\n return self.__class__(\n target_credentials=self._target_credentials,\n target_audience=self._target_audience,\n include_email=include_email,\n quota_project_id=self._quota_project_id,\n )\n\n @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject)\n def with_quota_project(self, quota_project_id):\n return self.__class__(\n target_credentials=self._target_credentials,\n target_audience=self._target_audience,\n include_email=self._include_email,\n quota_project_id=quota_project_id,\n )\n\n @_helpers.copy_docstring(credentials.Credentials)\n def refresh(self, request):\n from google.auth.transport.requests import AuthorizedSession\n\n iam_sign_endpoint = _IAM_IDTOKEN_ENDPOINT.format(\n self._target_credentials.signer_email\n )\n\n body = {\n \"audience\": self._target_audience,\n \"delegates\": self._target_credentials._delegates,\n \"includeEmail\": self._include_email,\n }\n\n headers = {\n \"Content-Type\": \"application/json\",\n metrics.API_CLIENT_HEADER: metrics.token_request_id_token_impersonate(),\n }\n\n authed_session = AuthorizedSession(\n self._target_credentials._source_credentials, auth_request=request\n )\n\n try:\n response = authed_session.post(\n url=iam_sign_endpoint,\n headers=headers,\n data=json.dumps(body).encode(\"utf-8\"),\n )\n finally:\n authed_session.close()\n\n if response.status_code != http_client.OK:\n raise exceptions.RefreshError(\n \"Error getting ID token: {}\".format(response.json())\n )\n\n id_token = response.json()[\"token\"]\n self.token = id_token\n self.expiry = datetime.utcfromtimestamp(\n jwt.decode(id_token, verify=False)[\"exp\"]\n )\n",
"path": "google/auth/impersonated_credentials.py"
}
] | diff --git a/google/auth/impersonated_credentials.py b/google/auth/impersonated_credentials.py
index 7c2f18d74..ba6012123 100644
--- a/google/auth/impersonated_credentials.py
+++ b/google/auth/impersonated_credentials.py
@@ -454,4 +454,6 @@ def refresh(self, request):
id_token = response.json()["token"]
self.token = id_token
- self.expiry = datetime.fromtimestamp(jwt.decode(id_token, verify=False)["exp"])
+ self.expiry = datetime.utcfromtimestamp(
+ jwt.decode(id_token, verify=False)["exp"]
+ )
diff --git a/tests/test_impersonated_credentials.py b/tests/test_impersonated_credentials.py
index 0c6ca0ce9..f79db8f6a 100644
--- a/tests/test_impersonated_credentials.py
+++ b/tests/test_impersonated_credentials.py
@@ -488,7 +488,7 @@ def test_id_token_success(
id_creds.refresh(request)
assert id_creds.token == ID_TOKEN_DATA
- assert id_creds.expiry == datetime.datetime.fromtimestamp(ID_TOKEN_EXPIRY)
+ assert id_creds.expiry == datetime.datetime.utcfromtimestamp(ID_TOKEN_EXPIRY)
def test_id_token_metrics(self, mock_donor_credentials):
credentials = self.make_credentials(lifetime=None)
@@ -512,7 +512,7 @@ def test_id_token_metrics(self, mock_donor_credentials):
id_creds.refresh(None)
assert id_creds.token == ID_TOKEN_DATA
- assert id_creds.expiry == datetime.datetime.fromtimestamp(
+ assert id_creds.expiry == datetime.datetime.utcfromtimestamp(
ID_TOKEN_EXPIRY
)
assert (
@@ -581,7 +581,7 @@ def test_id_token_with_target_audience(
id_creds.refresh(request)
assert id_creds.token == ID_TOKEN_DATA
- assert id_creds.expiry == datetime.datetime.fromtimestamp(ID_TOKEN_EXPIRY)
+ assert id_creds.expiry == datetime.datetime.utcfromtimestamp(ID_TOKEN_EXPIRY)
assert id_creds._include_email is True
def test_id_token_invalid_cred(
|
microsoft__DeepSpeed-5577 | [BUG] fp_quantizer is not correctly built when non-jit installation
**Describe the bug**
fp_quantizer is not correctly built when non-jit installation.
**To Reproduce**
Steps to reproduce the behavior:
```
DS_BUILD_FP_QUANTIZER=1 pip install deepspeed
```
install will succeed but
```
from deepspeed.ops.fp_quantizer import FP_Quantize
FP_Quantize()
```
will raise `ImportError: dynamic module does not define module export function (PyInit_fp_quantizer_op)`
**Expected behavior**
Renaming csrc/fp_quantizer/quantize.cu may solve the issue.
This restriction seems to be cause of the bug.
> Note that setuptools cannot handle files with the same name but different extensions
https://pytorch.org/tutorials/advanced/cpp_extension.html
| [
{
"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nfrom .builder import CUDAOpBuilder, installed_cuda_version\n\n\nclass FPQuantizerBuilder(CUDAOpBuilder):\n BUILD_VAR = \"DS_BUILD_FP_QUANTIZER\"\n NAME = \"fp_quantizer\"\n\n def __init__(self, name=None):\n name = self.NAME if name is None else name\n super().__init__(name=name)\n\n def absolute_name(self):\n return f'deepspeed.ops.fp_quantizer.{self.NAME}_op'\n\n def is_compatible(self, verbose=True):\n try:\n import torch\n except ImportError:\n self.warning(\"Please install torch if trying to pre-compile inference kernels\")\n return False\n\n cuda_okay = True\n if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda\n sys_cuda_major, _ = installed_cuda_version()\n torch_cuda_major = int(torch.version.cuda.split('.')[0])\n cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda\n if cuda_capability < 8:\n self.warning(\"NVIDIA Inference is only supported on Ampere and newer architectures\")\n cuda_okay = False\n if cuda_capability >= 8:\n if torch_cuda_major < 11 or sys_cuda_major < 11:\n self.warning(\"On Ampere and higher architectures please use CUDA 11+\")\n cuda_okay = False\n return super().is_compatible(verbose) and cuda_okay\n\n def filter_ccs(self, ccs):\n ccs_retained = []\n ccs_pruned = []\n for cc in ccs:\n if int(cc[0]) >= 8:\n ccs_retained.append(cc)\n else:\n ccs_pruned.append(cc)\n if len(ccs_pruned) > 0:\n self.warning(f\"Filtered compute capabilities {ccs_pruned}\")\n return ccs_retained\n\n def sources(self):\n return [\n \"csrc/fp_quantizer/quantize.cu\",\n \"csrc/fp_quantizer/quantize.cpp\",\n ]\n\n def extra_ldflags(self):\n return ['-lcurand']\n\n def include_paths(self):\n return ['csrc/fp_quantizer/includes', 'csrc/includes']\n",
"path": "op_builder/fp_quantizer.py"
}
] | [
{
"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nfrom .builder import CUDAOpBuilder, installed_cuda_version\n\n\nclass FPQuantizerBuilder(CUDAOpBuilder):\n BUILD_VAR = \"DS_BUILD_FP_QUANTIZER\"\n NAME = \"fp_quantizer\"\n\n def __init__(self, name=None):\n name = self.NAME if name is None else name\n super().__init__(name=name)\n\n def absolute_name(self):\n return f'deepspeed.ops.fp_quantizer.{self.NAME}_op'\n\n def is_compatible(self, verbose=True):\n try:\n import torch\n except ImportError:\n self.warning(\"Please install torch if trying to pre-compile inference kernels\")\n return False\n\n cuda_okay = True\n if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda\n sys_cuda_major, _ = installed_cuda_version()\n torch_cuda_major = int(torch.version.cuda.split('.')[0])\n cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda\n if cuda_capability < 8:\n self.warning(\"NVIDIA Inference is only supported on Ampere and newer architectures\")\n cuda_okay = False\n if cuda_capability >= 8:\n if torch_cuda_major < 11 or sys_cuda_major < 11:\n self.warning(\"On Ampere and higher architectures please use CUDA 11+\")\n cuda_okay = False\n return super().is_compatible(verbose) and cuda_okay\n\n def filter_ccs(self, ccs):\n ccs_retained = []\n ccs_pruned = []\n for cc in ccs:\n if int(cc[0]) >= 8:\n ccs_retained.append(cc)\n else:\n ccs_pruned.append(cc)\n if len(ccs_pruned) > 0:\n self.warning(f\"Filtered compute capabilities {ccs_pruned}\")\n return ccs_retained\n\n def sources(self):\n return [\n \"csrc/fp_quantizer/fp_quantize.cu\",\n \"csrc/fp_quantizer/fp_quantize.cpp\",\n ]\n\n def extra_ldflags(self):\n return ['-lcurand']\n\n def include_paths(self):\n return ['csrc/fp_quantizer/includes', 'csrc/includes']\n",
"path": "op_builder/fp_quantizer.py"
}
] | diff --git a/csrc/fp_quantizer/quantize.cpp b/csrc/fp_quantizer/fp_quantize.cpp
similarity index 99%
rename from csrc/fp_quantizer/quantize.cpp
rename to csrc/fp_quantizer/fp_quantize.cpp
index ec631c576e27..6962b8050f51 100644
--- a/csrc/fp_quantizer/quantize.cpp
+++ b/csrc/fp_quantizer/fp_quantize.cpp
@@ -3,7 +3,7 @@
// DeepSpeed Team
-#include "quantize.h"
+#include "fp_quantize.h"
#include <c10/cuda/CUDAStream.h>
#include <torch/extension.h>
diff --git a/csrc/fp_quantizer/quantize.cu b/csrc/fp_quantizer/fp_quantize.cu
similarity index 99%
rename from csrc/fp_quantizer/quantize.cu
rename to csrc/fp_quantizer/fp_quantize.cu
index 5ada6894747f..cca063956167 100644
--- a/csrc/fp_quantizer/quantize.cu
+++ b/csrc/fp_quantizer/fp_quantize.cu
@@ -5,8 +5,8 @@
#include <stdexcept>
#include "context.h"
+#include "fp_quantize.h"
#include "memory_access_utils.h"
-#include "quantize.h"
#include "reduction_utils.h"
#include <cuda.h>
diff --git a/csrc/fp_quantizer/includes/quantize.h b/csrc/fp_quantizer/includes/fp_quantize.h
similarity index 100%
rename from csrc/fp_quantizer/includes/quantize.h
rename to csrc/fp_quantizer/includes/fp_quantize.h
diff --git a/op_builder/fp_quantizer.py b/op_builder/fp_quantizer.py
index bafd3e0c33f6..9f07ec3d1275 100644
--- a/op_builder/fp_quantizer.py
+++ b/op_builder/fp_quantizer.py
@@ -52,8 +52,8 @@ def filter_ccs(self, ccs):
def sources(self):
return [
- "csrc/fp_quantizer/quantize.cu",
- "csrc/fp_quantizer/quantize.cpp",
+ "csrc/fp_quantizer/fp_quantize.cu",
+ "csrc/fp_quantizer/fp_quantize.cpp",
]
def extra_ldflags(self):
|
nipy__nipype-2182 | Modelgen SpecifyModel TypeError: apply_along_axis()
@effigies
SpecifyModel rises an error with the new function from
commit cd49748be5d7a8201496548922d85f63bb4034dc
modelgen.py line 399ff
With numpy 1.8.2 I get
TypeError: apply_along_axis() got an unexpected keyword argument 'source'
Interface SpecifyModel failed to run
Joerg
| [
{
"content": "\"\"\" This file contains defines parameters for nipy that we use to fill\nsettings in setup.py, the nipy top-level docstring, and for building the\ndocs. In setup.py in particular, we exec this file, so it cannot import nipy\n\"\"\"\nfrom __future__ import print_function, division, unicode_literals, absolute_import\n\nimport sys\n\n# nipype version information. An empty version_extra corresponds to a\n# full release. '.dev' as a version_extra string means this is a development\n# version\n# Remove -dev for release\n__version__ = '1.0.0-dev'\n\n\ndef get_nipype_gitversion():\n \"\"\"Nipype version as reported by the last commit in git\n\n Returns\n -------\n None or str\n Version of Nipype according to git.\n \"\"\"\n import os\n import subprocess\n try:\n import nipype\n gitpath = os.path.realpath(os.path.join(os.path.dirname(nipype.__file__),\n os.path.pardir))\n except:\n gitpath = os.getcwd()\n gitpathgit = os.path.join(gitpath, '.git')\n if not os.path.exists(gitpathgit):\n return None\n ver = None\n try:\n o, _ = subprocess.Popen('git describe', shell=True, cwd=gitpath,\n stdout=subprocess.PIPE).communicate()\n except Exception:\n pass\n else:\n ver = o.decode().strip().split('-')[-1]\n return ver\n\nif __version__.endswith('-dev'):\n gitversion = get_nipype_gitversion()\n if gitversion:\n __version__ = '{}+{}'.format(__version__, gitversion)\n\nCLASSIFIERS = ['Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering']\n\ndescription = 'Neuroimaging in Python: Pipelines and Interfaces'\n\n# Note: this long_description is actually a copy/paste from the top-level\n# README.txt, so that it shows up nicely on PyPI. So please remember to edit\n# it only in one place and sync it correctly.\nlong_description = \"\"\"========================================================\nNIPYPE: Neuroimaging in Python: Pipelines and Interfaces\n========================================================\n\nCurrent neuroimaging software offer users an incredible opportunity to \\\nanalyze data using a variety of different algorithms. However, this has \\\nresulted in a heterogeneous collection of specialized applications \\\nwithout transparent interoperability or a uniform operating interface.\n\n*Nipype*, an open-source, community-developed initiative under the \\\numbrella of NiPy_, is a Python project that provides a uniform interface \\\nto existing neuroimaging software and facilitates interaction between \\\nthese packages within a single workflow. Nipype provides an environment \\\nthat encourages interactive exploration of algorithms from different \\\npackages (e.g., AFNI, ANTS, BRAINS, BrainSuite, Camino, FreeSurfer, FSL, MNE, \\\nMRtrix, MNE, Nipy, Slicer, SPM), eases the design of workflows within and \\\nbetween packages, and reduces the learning curve necessary to use different \\\npackages. Nipype is creating a collaborative platform for neuroimaging software \\\ndevelopment in a high-level language and addressing limitations of existing \\\npipeline systems.\n\n*Nipype* allows you to:\n\n* easily interact with tools from different software packages\n* combine processing steps from different software packages\n* develop new workflows faster by reusing common steps from old ones\n* process data faster by running it in parallel on many cores/machines\n* make your research easily reproducible\n* share your processing workflows with the community\n\"\"\"\n\n# versions\nNIBABEL_MIN_VERSION = '2.1.0'\nNETWORKX_MIN_VERSION = '1.9'\nNUMPY_MIN_VERSION = '1.8.2'\nSCIPY_MIN_VERSION = '0.14'\nTRAITS_MIN_VERSION = '4.6'\nDATEUTIL_MIN_VERSION = '2.2'\nPYTEST_MIN_VERSION = '3.0'\nFUTURE_MIN_VERSION = '0.16.0'\nSIMPLEJSON_MIN_VERSION = '3.8.0'\nPROV_VERSION = '1.5.0'\nCLICK_MIN_VERSION = '6.6.0'\n\nNAME = 'nipype'\nMAINTAINER = 'nipype developers'\nMAINTAINER_EMAIL = '[email protected]'\nDESCRIPTION = description\nLONG_DESCRIPTION = long_description\nURL = 'http://nipy.org/nipype'\nDOWNLOAD_URL = 'http://github.com/nipy/nipype/archives/master'\nLICENSE = 'Apache License, 2.0'\nCLASSIFIERS = CLASSIFIERS\nAUTHOR = 'nipype developers'\nAUTHOR_EMAIL = '[email protected]'\nPLATFORMS = 'OS Independent'\nMAJOR = __version__.split('.')[0]\nMINOR = __version__.split('.')[1]\nMICRO = __version__.replace('-', '.').split('.')[2]\nISRELEASE = (len(__version__.replace('-', '.').split('.')) == 3 or\n 'post' in __version__.replace('-', '.').split('.')[-1])\nVERSION = __version__\nPROVIDES = ['nipype']\nREQUIRES = [\n 'nibabel>=%s' % NIBABEL_MIN_VERSION,\n 'networkx>=%s' % NETWORKX_MIN_VERSION,\n 'numpy>=%s' % NUMPY_MIN_VERSION,\n 'python-dateutil>=%s' % DATEUTIL_MIN_VERSION,\n 'scipy>=%s' % SCIPY_MIN_VERSION,\n 'traits>=%s' % TRAITS_MIN_VERSION,\n 'future>=%s' % FUTURE_MIN_VERSION,\n 'simplejson>=%s' % SIMPLEJSON_MIN_VERSION,\n 'prov==%s' % PROV_VERSION,\n 'click>=%s' % CLICK_MIN_VERSION,\n 'funcsigs',\n 'pytest>=%s' % PYTEST_MIN_VERSION,\n 'mock',\n 'pydotplus',\n 'packaging',\n]\n\nif sys.version_info <= (3, 4):\n REQUIRES.append('configparser')\n\nTESTS_REQUIRES = [\n 'pytest-cov',\n 'codecov'\n]\n\nEXTRA_REQUIRES = {\n 'doc': ['Sphinx>=1.4', 'matplotlib', 'pydotplus'],\n 'tests': TESTS_REQUIRES,\n 'nipy': ['nitime', 'nilearn', 'dipy', 'nipy', 'matplotlib'],\n 'profiler': ['psutil'],\n 'duecredit': ['duecredit'],\n 'xvfbwrapper': ['xvfbwrapper'],\n # 'mesh': ['mayavi'] # Enable when it works\n}\n\n# Enable a handle to install all extra dependencies at once\nEXTRA_REQUIRES['all'] = [val for _, val in list(EXTRA_REQUIRES.items())]\n\nSTATUS = 'stable'\n",
"path": "nipype/info.py"
}
] | [
{
"content": "\"\"\" This file contains defines parameters for nipy that we use to fill\nsettings in setup.py, the nipy top-level docstring, and for building the\ndocs. In setup.py in particular, we exec this file, so it cannot import nipy\n\"\"\"\nfrom __future__ import print_function, division, unicode_literals, absolute_import\n\nimport sys\n\n# nipype version information. An empty version_extra corresponds to a\n# full release. '.dev' as a version_extra string means this is a development\n# version\n# Remove -dev for release\n__version__ = '1.0.0-dev'\n\n\ndef get_nipype_gitversion():\n \"\"\"Nipype version as reported by the last commit in git\n\n Returns\n -------\n None or str\n Version of Nipype according to git.\n \"\"\"\n import os\n import subprocess\n try:\n import nipype\n gitpath = os.path.realpath(os.path.join(os.path.dirname(nipype.__file__),\n os.path.pardir))\n except:\n gitpath = os.getcwd()\n gitpathgit = os.path.join(gitpath, '.git')\n if not os.path.exists(gitpathgit):\n return None\n ver = None\n try:\n o, _ = subprocess.Popen('git describe', shell=True, cwd=gitpath,\n stdout=subprocess.PIPE).communicate()\n except Exception:\n pass\n else:\n ver = o.decode().strip().split('-')[-1]\n return ver\n\nif __version__.endswith('-dev'):\n gitversion = get_nipype_gitversion()\n if gitversion:\n __version__ = '{}+{}'.format(__version__, gitversion)\n\nCLASSIFIERS = ['Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering']\n\ndescription = 'Neuroimaging in Python: Pipelines and Interfaces'\n\n# Note: this long_description is actually a copy/paste from the top-level\n# README.txt, so that it shows up nicely on PyPI. So please remember to edit\n# it only in one place and sync it correctly.\nlong_description = \"\"\"========================================================\nNIPYPE: Neuroimaging in Python: Pipelines and Interfaces\n========================================================\n\nCurrent neuroimaging software offer users an incredible opportunity to \\\nanalyze data using a variety of different algorithms. However, this has \\\nresulted in a heterogeneous collection of specialized applications \\\nwithout transparent interoperability or a uniform operating interface.\n\n*Nipype*, an open-source, community-developed initiative under the \\\numbrella of NiPy_, is a Python project that provides a uniform interface \\\nto existing neuroimaging software and facilitates interaction between \\\nthese packages within a single workflow. Nipype provides an environment \\\nthat encourages interactive exploration of algorithms from different \\\npackages (e.g., AFNI, ANTS, BRAINS, BrainSuite, Camino, FreeSurfer, FSL, MNE, \\\nMRtrix, MNE, Nipy, Slicer, SPM), eases the design of workflows within and \\\nbetween packages, and reduces the learning curve necessary to use different \\\npackages. Nipype is creating a collaborative platform for neuroimaging software \\\ndevelopment in a high-level language and addressing limitations of existing \\\npipeline systems.\n\n*Nipype* allows you to:\n\n* easily interact with tools from different software packages\n* combine processing steps from different software packages\n* develop new workflows faster by reusing common steps from old ones\n* process data faster by running it in parallel on many cores/machines\n* make your research easily reproducible\n* share your processing workflows with the community\n\"\"\"\n\n# versions\nNIBABEL_MIN_VERSION = '2.1.0'\nNETWORKX_MIN_VERSION = '1.9'\nNUMPY_MIN_VERSION = '1.9.0'\nSCIPY_MIN_VERSION = '0.14'\nTRAITS_MIN_VERSION = '4.6'\nDATEUTIL_MIN_VERSION = '2.2'\nPYTEST_MIN_VERSION = '3.0'\nFUTURE_MIN_VERSION = '0.16.0'\nSIMPLEJSON_MIN_VERSION = '3.8.0'\nPROV_VERSION = '1.5.0'\nCLICK_MIN_VERSION = '6.6.0'\n\nNAME = 'nipype'\nMAINTAINER = 'nipype developers'\nMAINTAINER_EMAIL = '[email protected]'\nDESCRIPTION = description\nLONG_DESCRIPTION = long_description\nURL = 'http://nipy.org/nipype'\nDOWNLOAD_URL = 'http://github.com/nipy/nipype/archives/master'\nLICENSE = 'Apache License, 2.0'\nCLASSIFIERS = CLASSIFIERS\nAUTHOR = 'nipype developers'\nAUTHOR_EMAIL = '[email protected]'\nPLATFORMS = 'OS Independent'\nMAJOR = __version__.split('.')[0]\nMINOR = __version__.split('.')[1]\nMICRO = __version__.replace('-', '.').split('.')[2]\nISRELEASE = (len(__version__.replace('-', '.').split('.')) == 3 or\n 'post' in __version__.replace('-', '.').split('.')[-1])\nVERSION = __version__\nPROVIDES = ['nipype']\nREQUIRES = [\n 'nibabel>=%s' % NIBABEL_MIN_VERSION,\n 'networkx>=%s' % NETWORKX_MIN_VERSION,\n 'numpy>=%s' % NUMPY_MIN_VERSION,\n 'python-dateutil>=%s' % DATEUTIL_MIN_VERSION,\n 'scipy>=%s' % SCIPY_MIN_VERSION,\n 'traits>=%s' % TRAITS_MIN_VERSION,\n 'future>=%s' % FUTURE_MIN_VERSION,\n 'simplejson>=%s' % SIMPLEJSON_MIN_VERSION,\n 'prov==%s' % PROV_VERSION,\n 'click>=%s' % CLICK_MIN_VERSION,\n 'funcsigs',\n 'pytest>=%s' % PYTEST_MIN_VERSION,\n 'mock',\n 'pydotplus',\n 'packaging',\n]\n\nif sys.version_info <= (3, 4):\n REQUIRES.append('configparser')\n\nTESTS_REQUIRES = [\n 'pytest-cov',\n 'codecov'\n]\n\nEXTRA_REQUIRES = {\n 'doc': ['Sphinx>=1.4', 'matplotlib', 'pydotplus'],\n 'tests': TESTS_REQUIRES,\n 'nipy': ['nitime', 'nilearn', 'dipy', 'nipy', 'matplotlib'],\n 'profiler': ['psutil'],\n 'duecredit': ['duecredit'],\n 'xvfbwrapper': ['xvfbwrapper'],\n # 'mesh': ['mayavi'] # Enable when it works\n}\n\n# Enable a handle to install all extra dependencies at once\nEXTRA_REQUIRES['all'] = [val for _, val in list(EXTRA_REQUIRES.items())]\n\nSTATUS = 'stable'\n",
"path": "nipype/info.py"
}
] | diff --git a/doc/devel/gitwash/known_projects.inc b/doc/devel/gitwash/known_projects.inc
index 2972352877..ce939b110e 100644
--- a/doc/devel/gitwash/known_projects.inc
+++ b/doc/devel/gitwash/known_projects.inc
@@ -6,7 +6,7 @@
.. _`PROJECTNAME mailing list`: http://projects.scipy.org/mailman/listinfo/nipy-devel
.. numpy
-.. _numpy: hhttp://numpy.scipy.org
+.. _numpy: http://numpy.scipy.org
.. _`numpy github`: http://github.com/numpy/numpy
.. _`numpy mailing list`: http://mail.scipy.org/mailman/listinfo/numpy-discussion
diff --git a/nipype/info.py b/nipype/info.py
index 9db9a02abd..4b416c6db3 100644
--- a/nipype/info.py
+++ b/nipype/info.py
@@ -98,7 +98,7 @@ def get_nipype_gitversion():
# versions
NIBABEL_MIN_VERSION = '2.1.0'
NETWORKX_MIN_VERSION = '1.9'
-NUMPY_MIN_VERSION = '1.8.2'
+NUMPY_MIN_VERSION = '1.9.0'
SCIPY_MIN_VERSION = '0.14'
TRAITS_MIN_VERSION = '4.6'
DATEUTIL_MIN_VERSION = '2.2'
diff --git a/requirements.txt b/requirements.txt
index bcd3ab2fef..a697b62244 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-numpy>=1.6.2
+numpy>=1.9.0
scipy>=0.11
networkx>=1.7
traits>=4.6
|
nipy__nipype-3220 | [BUG] STDOUT of CommandLine is stored as one letter per line
### Summary
`STDOUT` from `CommandLine` interface is stored as one **letter** per line in `Terminal output` section of `_report/report.rst`
### Actual behavior
```
T
h
i
s
i
s
m
y
o
u
t
p
u
t
```
### Expected behavior
`This is my output`
### How to replicate the behavior
```bash
cd /tmp
# save test.py and test2.py
export PYTHONPATH=/tmp
python test2.py
vim /tmp/stdoutError/list_content/_report/report.rst
```
### Script/Workflow details
<details><summary>test.py</summary>
<p>
```python
#!/usr/bin/env python
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, Str
# ==================
class LsInputSpec(CommandLineInputSpec):
in_path= Str(argstr='%s')
class LsOutputSpec(TraitedSpec):
pass
class Ls(CommandLine):
_cmd = 'ls'
input_spec = LsInputSpec
output_spec = LsOutputSpec
def _list_outputs(self):
pass
```
</p>
</details>
<details><summary>test2.py</summary>
<p>
```python
from nipype import Node, Workflow, SelectFiles
from test import Ls
# ==================
parent_directory= '/tmp/'
# ===============
templates= {'path': '/tmp/'}
select_files= Node(SelectFiles(templates), name= 'select_files')
# ===============
stdoutError= Workflow(name='stdoutError', base_dir= parent_directory)
ls_node= Node(Ls(), name='list_content')
stdoutError.connect(select_files, 'path', ls_node, 'in_path')
stdoutError.run()
```
</p>
</details>
### Platform details:
<!-- Please run the following code from your shell and place the output between the triple ticks, below.
python -c "import nipype; from pprint import pprint; pprint(nipype.get_info())"
-->
```
191119-20:05:34,389 nipype.utils INFO:
Running nipype version 1.2.3 (latest: 1.3.1)
{'commit_hash': 'a485cf60f',
'commit_source': 'installation',
'networkx_version': '2.2',
'nibabel_version': '2.3.0',
'nipype_version': '1.2.3',
'numpy_version': '1.16.2',
'pkg_path': '/home/tb571/miniconda3/lib/python3.6/site-packages/nipype',
'scipy_version': '1.2.1',
'sys_executable': '/home/tb571/miniconda3/bin/python',
'sys_platform': 'linux',
'sys_version': '3.6.7 | packaged by conda-forge | (default, Feb 28 2019, '
'09:07:38) \n'
'[GCC 7.3.0]',
'traits_version': '4.6.0'}
```
| [
{
"content": "# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Miscellaneous file manipulation functions\n\"\"\"\nimport sys\nimport pickle\nimport errno\nimport subprocess as sp\nimport gzip\nimport hashlib\nimport locale\nfrom hashlib import md5\nimport os\nimport os.path as op\nimport re\nimport shutil\nimport contextlib\nimport posixpath\nfrom pathlib import Path\nimport simplejson as json\nfrom time import sleep, time\n\nfrom .. import logging, config, __version__ as version\nfrom .misc import is_container\n\nfmlogger = logging.getLogger(\"nipype.utils\")\n\nrelated_filetype_sets = [\n (\".hdr\", \".img\", \".mat\"),\n (\".nii\", \".mat\"),\n (\".BRIK\", \".HEAD\"),\n]\n\n\ndef _resolve_with_filenotfound(path, **kwargs):\n \"\"\" Raise FileNotFoundError instead of OSError \"\"\"\n try:\n return path.resolve(**kwargs)\n except OSError as e:\n if isinstance(e, FileNotFoundError):\n raise\n raise FileNotFoundError(str(path))\n\n\ndef path_resolve(path, strict=False):\n try:\n return _resolve_with_filenotfound(path, strict=strict)\n except TypeError: # PY35\n pass\n\n path = path.absolute()\n if strict or path.exists():\n return _resolve_with_filenotfound(path)\n\n # This is a hacky shortcut, using path.absolute() unmodified\n # In cases where the existing part of the path contains a\n # symlink, different results will be produced\n return path\n\n\ndef split_filename(fname):\n \"\"\"Split a filename into parts: path, base filename and extension.\n\n Parameters\n ----------\n fname : str\n file or path name\n\n Returns\n -------\n pth : str\n base path from fname\n fname : str\n filename from fname, without extension\n ext : str\n file extension from fname\n\n Examples\n --------\n >>> from nipype.utils.filemanip import split_filename\n >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')\n >>> pth\n '/home/data'\n\n >>> fname\n 'subject'\n\n >>> ext\n '.nii.gz'\n\n \"\"\"\n\n special_extensions = [\".nii.gz\", \".tar.gz\", \".niml.dset\"]\n\n pth = op.dirname(fname)\n fname = op.basename(fname)\n\n ext = None\n for special_ext in special_extensions:\n ext_len = len(special_ext)\n if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()):\n ext = fname[-ext_len:]\n fname = fname[:-ext_len]\n break\n if not ext:\n fname, ext = op.splitext(fname)\n\n return pth, fname, ext\n\n\ndef fname_presuffix(fname, prefix=\"\", suffix=\"\", newpath=None, use_ext=True):\n \"\"\"Manipulates path and name of input filename\n\n Parameters\n ----------\n fname : string\n A filename (may or may not include path)\n prefix : string\n Characters to prepend to the filename\n suffix : string\n Characters to append to the filename\n newpath : string\n Path to replace the path of the input fname\n use_ext : boolean\n If True (default), appends the extension of the original file\n to the output name.\n\n Returns\n -------\n Absolute path of the modified filename\n\n >>> from nipype.utils.filemanip import fname_presuffix\n >>> fname = 'foo.nii.gz'\n >>> fname_presuffix(fname,'pre','post','/tmp')\n '/tmp/prefoopost.nii.gz'\n\n >>> from nipype.interfaces.base import Undefined\n >>> fname_presuffix(fname, 'pre', 'post', Undefined) == \\\n fname_presuffix(fname, 'pre', 'post')\n True\n\n \"\"\"\n pth, fname, ext = split_filename(fname)\n if not use_ext:\n ext = \"\"\n\n # No need for isdefined: bool(Undefined) evaluates to False\n if newpath:\n pth = op.abspath(newpath)\n return op.join(pth, prefix + fname + suffix + ext)\n\n\ndef fnames_presuffix(fnames, prefix=\"\", suffix=\"\", newpath=None, use_ext=True):\n \"\"\"Calls fname_presuffix for a list of files.\n \"\"\"\n f2 = []\n for fname in fnames:\n f2.append(fname_presuffix(fname, prefix, suffix, newpath, use_ext))\n return f2\n\n\ndef hash_rename(filename, hashvalue):\n \"\"\"renames a file given original filename and hash\n and sets path to output_directory\n \"\"\"\n path, name, ext = split_filename(filename)\n newfilename = \"\".join((name, \"_0x\", hashvalue, ext))\n return op.join(path, newfilename)\n\n\ndef check_forhash(filename):\n \"\"\"checks if file has a hash in its filename\"\"\"\n if isinstance(filename, list):\n filename = filename[0]\n path, name = op.split(filename)\n if re.search(\"(_0x[a-z0-9]{32})\", name):\n hashvalue = re.findall(\"(_0x[a-z0-9]{32})\", name)\n return True, hashvalue\n else:\n return False, None\n\n\ndef hash_infile(afile, chunk_len=8192, crypto=hashlib.md5, raise_notfound=False):\n \"\"\"\n Computes hash of a file using 'crypto' module\n\n >>> hash_infile('smri_ants_registration_settings.json')\n 'f225785dfb0db9032aa5a0e4f2c730ad'\n\n >>> hash_infile('surf01.vtk')\n 'fdf1cf359b4e346034372cdeb58f9a88'\n\n >>> hash_infile('spminfo')\n '0dc55e3888c98a182dab179b976dfffc'\n\n >>> hash_infile('fsl_motion_outliers_fd.txt')\n 'defd1812c22405b1ee4431aac5bbdd73'\n\n\n \"\"\"\n if not op.isfile(afile):\n if raise_notfound:\n raise RuntimeError('File \"%s\" not found.' % afile)\n return None\n\n crypto_obj = crypto()\n with open(afile, \"rb\") as fp:\n while True:\n data = fp.read(chunk_len)\n if not data:\n break\n crypto_obj.update(data)\n return crypto_obj.hexdigest()\n\n\ndef hash_timestamp(afile):\n \"\"\" Computes md5 hash of the timestamp of a file \"\"\"\n md5hex = None\n if op.isfile(afile):\n md5obj = md5()\n stat = os.stat(afile)\n md5obj.update(str(stat.st_size).encode())\n md5obj.update(str(stat.st_mtime).encode())\n md5hex = md5obj.hexdigest()\n return md5hex\n\n\ndef _parse_mount_table(exit_code, output):\n \"\"\"Parses the output of ``mount`` to produce (path, fs_type) pairs\n\n Separated from _generate_cifs_table to enable testing logic with real\n outputs\n \"\"\"\n # Not POSIX\n if exit_code != 0:\n return []\n\n # Linux mount example: sysfs on /sys type sysfs (rw,nosuid,nodev,noexec)\n # <PATH>^^^^ ^^^^^<FSTYPE>\n # OSX mount example: /dev/disk2 on / (hfs, local, journaled)\n # <PATH>^ ^^^<FSTYPE>\n pattern = re.compile(r\".*? on (/.*?) (?:type |\\()([^\\s,\\)]+)\")\n\n # Keep line and match for error reporting (match == None on failure)\n # Ignore empty lines\n matches = [(l, pattern.match(l)) for l in output.strip().splitlines() if l]\n\n # (path, fstype) tuples, sorted by path length (longest first)\n mount_info = sorted(\n (match.groups() for _, match in matches if match is not None),\n key=lambda x: len(x[0]),\n reverse=True,\n )\n cifs_paths = [path for path, fstype in mount_info if fstype.lower() == \"cifs\"]\n\n # Report failures as warnings\n for line, match in matches:\n if match is None:\n fmlogger.debug(\"Cannot parse mount line: '%s'\", line)\n\n return [\n mount\n for mount in mount_info\n if any(mount[0].startswith(path) for path in cifs_paths)\n ]\n\n\ndef _generate_cifs_table():\n \"\"\"Construct a reverse-length-ordered list of mount points that\n fall under a CIFS mount.\n\n This precomputation allows efficient checking for whether a given path\n would be on a CIFS filesystem.\n\n On systems without a ``mount`` command, or with no CIFS mounts, returns an\n empty list.\n \"\"\"\n exit_code, output = sp.getstatusoutput(\"mount\")\n return _parse_mount_table(exit_code, output)\n\n\n_cifs_table = _generate_cifs_table()\n\n\ndef on_cifs(fname):\n \"\"\"\n Checks whether a file path is on a CIFS filesystem mounted in a POSIX\n host (i.e., has the ``mount`` command).\n\n On Windows, Docker mounts host directories into containers through CIFS\n shares, which has support for Minshall+French symlinks, or text files that\n the CIFS driver exposes to the OS as symlinks.\n We have found that under concurrent access to the filesystem, this feature\n can result in failures to create or read recently-created symlinks,\n leading to inconsistent behavior and ``FileNotFoundError``.\n\n This check is written to support disabling symlinks on CIFS shares.\n\n \"\"\"\n # Only the first match (most recent parent) counts\n for fspath, fstype in _cifs_table:\n if fname.startswith(fspath):\n return fstype == \"cifs\"\n return False\n\n\ndef copyfile(\n originalfile,\n newfile,\n copy=False,\n create_new=False,\n hashmethod=None,\n use_hardlink=False,\n copy_related_files=True,\n):\n \"\"\"Copy or link ``originalfile`` to ``newfile``.\n\n If ``use_hardlink`` is True, and the file can be hard-linked, then a\n link is created, instead of copying the file.\n\n If a hard link is not created and ``copy`` is False, then a symbolic\n link is created.\n\n Parameters\n ----------\n originalfile : str\n full path to original file\n newfile : str\n full path to new file\n copy : Bool\n specifies whether to copy or symlink files\n (default=False) but only for POSIX systems\n use_hardlink : Bool\n specifies whether to hard-link files, when able\n (Default=False), taking precedence over copy\n copy_related_files : Bool\n specifies whether to also operate on related files, as defined in\n ``related_filetype_sets``\n\n Returns\n -------\n None\n\n \"\"\"\n newhash = None\n orighash = None\n fmlogger.debug(newfile)\n\n if create_new:\n while op.exists(newfile):\n base, fname, ext = split_filename(newfile)\n s = re.search(\"_c[0-9]{4,4}$\", fname)\n i = 0\n if s:\n i = int(s.group()[2:]) + 1\n fname = fname[:-6] + \"_c%04d\" % i\n else:\n fname += \"_c%04d\" % i\n newfile = base + os.sep + fname + ext\n\n if hashmethod is None:\n hashmethod = config.get(\"execution\", \"hash_method\").lower()\n\n # Don't try creating symlinks on CIFS\n if copy is False and on_cifs(newfile):\n copy = True\n\n # Existing file\n # -------------\n # Options:\n # symlink\n # to regular file originalfile (keep if symlinking)\n # to same dest as symlink originalfile (keep if symlinking)\n # to other file (unlink)\n # regular file\n # hard link to originalfile (keep)\n # copy of file (same hash) (keep)\n # different file (diff hash) (unlink)\n keep = False\n if op.lexists(newfile):\n if op.islink(newfile):\n if all(\n (\n os.readlink(newfile) == op.realpath(originalfile),\n not use_hardlink,\n not copy,\n )\n ):\n keep = True\n elif posixpath.samefile(newfile, originalfile):\n keep = True\n else:\n if hashmethod == \"timestamp\":\n hashfn = hash_timestamp\n elif hashmethod == \"content\":\n hashfn = hash_infile\n else:\n raise AttributeError(\"Unknown hash method found:\", hashmethod)\n newhash = hashfn(newfile)\n fmlogger.debug(\n \"File: %s already exists,%s, copy:%d\", newfile, newhash, copy\n )\n orighash = hashfn(originalfile)\n keep = newhash == orighash\n if keep:\n fmlogger.debug(\n \"File: %s already exists, not overwriting, copy:%d\", newfile, copy\n )\n else:\n os.unlink(newfile)\n\n # New file\n # --------\n # use_hardlink & can_hardlink => hardlink\n # ~hardlink & ~copy & can_symlink => symlink\n # ~hardlink & ~symlink => copy\n if not keep and use_hardlink:\n try:\n fmlogger.debug(\"Linking File: %s->%s\", newfile, originalfile)\n # Use realpath to avoid hardlinking symlinks\n os.link(op.realpath(originalfile), newfile)\n except OSError:\n use_hardlink = False # Disable hardlink for associated files\n else:\n keep = True\n\n if not keep and not copy and os.name == \"posix\":\n try:\n fmlogger.debug(\"Symlinking File: %s->%s\", newfile, originalfile)\n os.symlink(originalfile, newfile)\n except OSError:\n copy = True # Disable symlink for associated files\n else:\n keep = True\n\n if not keep:\n try:\n fmlogger.debug(\"Copying File: %s->%s\", newfile, originalfile)\n shutil.copyfile(originalfile, newfile)\n except shutil.Error as e:\n fmlogger.warning(e.message)\n\n # Associated files\n if copy_related_files:\n related_file_pairs = (\n get_related_files(f, include_this_file=False)\n for f in (originalfile, newfile)\n )\n for alt_ofile, alt_nfile in zip(*related_file_pairs):\n if op.exists(alt_ofile):\n copyfile(\n alt_ofile,\n alt_nfile,\n copy,\n hashmethod=hashmethod,\n use_hardlink=use_hardlink,\n copy_related_files=False,\n )\n\n return newfile\n\n\ndef get_related_files(filename, include_this_file=True):\n \"\"\"Returns a list of related files, as defined in\n ``related_filetype_sets``, for a filename. (e.g., Nifti-Pair, Analyze (SPM)\n and AFNI files).\n\n Parameters\n ----------\n filename : str\n File name to find related filetypes of.\n include_this_file : bool\n If true, output includes the input filename.\n \"\"\"\n related_files = []\n path, name, this_type = split_filename(filename)\n for type_set in related_filetype_sets:\n if this_type in type_set:\n for related_type in type_set:\n if include_this_file or related_type != this_type:\n related_files.append(op.join(path, name + related_type))\n if not len(related_files):\n related_files = [filename]\n return related_files\n\n\ndef copyfiles(filelist, dest, copy=False, create_new=False):\n \"\"\"Copy or symlink files in ``filelist`` to ``dest`` directory.\n\n Parameters\n ----------\n filelist : list\n List of files to copy.\n dest : path/files\n full path to destination. If it is a list of length greater\n than 1, then it assumes that these are the names of the new\n files.\n copy : Bool\n specifies whether to copy or symlink files\n (default=False) but only for posix systems\n\n Returns\n -------\n None\n\n \"\"\"\n outfiles = ensure_list(dest)\n newfiles = []\n for i, f in enumerate(ensure_list(filelist)):\n if isinstance(f, list):\n newfiles.insert(i, copyfiles(f, dest, copy=copy, create_new=create_new))\n else:\n if len(outfiles) > 1:\n destfile = outfiles[i]\n else:\n destfile = fname_presuffix(f, newpath=outfiles[0])\n destfile = copyfile(f, destfile, copy, create_new=create_new)\n newfiles.insert(i, destfile)\n return newfiles\n\n\ndef ensure_list(filename):\n \"\"\"Returns a list given either a string or a list\n \"\"\"\n if isinstance(filename, (str, bytes)):\n return [filename]\n elif isinstance(filename, list):\n return filename\n elif is_container(filename):\n return [x for x in filename]\n else:\n return None\n\n\ndef simplify_list(filelist):\n \"\"\"Returns a list if filelist is a list of length greater than 1,\n otherwise returns the first element\n \"\"\"\n if len(filelist) > 1:\n return filelist\n else:\n return filelist[0]\n\n\nfilename_to_list = ensure_list\nlist_to_filename = simplify_list\n\n\ndef check_depends(targets, dependencies):\n \"\"\"Return true if all targets exist and are newer than all dependencies.\n\n An OSError will be raised if there are missing dependencies.\n \"\"\"\n tgts = ensure_list(targets)\n deps = ensure_list(dependencies)\n return all(map(op.exists, tgts)) and min(map(op.getmtime, tgts)) > max(\n list(map(op.getmtime, deps)) + [0]\n )\n\n\ndef save_json(filename, data):\n \"\"\"Save data to a json file\n\n Parameters\n ----------\n filename : str\n Filename to save data in.\n data : dict\n Dictionary to save in json file.\n\n \"\"\"\n mode = \"w\"\n with open(filename, mode) as fp:\n json.dump(data, fp, sort_keys=True, indent=4)\n\n\ndef load_json(filename):\n \"\"\"Load data from a json file\n\n Parameters\n ----------\n filename : str\n Filename to load data from.\n\n Returns\n -------\n data : dict\n\n \"\"\"\n\n with open(filename, \"r\") as fp:\n data = json.load(fp)\n return data\n\n\ndef loadcrash(infile, *args):\n if infile.endswith(\"pkl\") or infile.endswith(\"pklz\"):\n return loadpkl(infile)\n else:\n raise ValueError(\"Only pickled crashfiles are supported\")\n\n\ndef loadpkl(infile):\n \"\"\"Load a zipped or plain cPickled file.\"\"\"\n infile = Path(infile)\n fmlogger.debug(\"Loading pkl: %s\", infile)\n pklopen = gzip.open if infile.suffix == \".pklz\" else open\n\n t = time()\n timeout = float(config.get(\"execution\", \"job_finished_timeout\"))\n timed_out = True\n while (time() - t) < timeout:\n if infile.exists():\n timed_out = False\n break\n fmlogger.debug(\"'{}' missing; waiting 2s\".format(infile))\n sleep(2)\n if timed_out:\n error_message = (\n \"Result file {0} expected, but \"\n \"does not exist after ({1}) \"\n \"seconds.\".format(infile, timeout)\n )\n raise IOError(error_message)\n\n with pklopen(str(infile), \"rb\") as pkl_file:\n pkl_contents = pkl_file.read()\n\n pkl_metadata = None\n\n # Look if pkl file contains version metadata\n idx = pkl_contents.find(b\"\\n\")\n if idx >= 0:\n try:\n pkl_metadata = json.loads(pkl_contents[:idx])\n except (UnicodeDecodeError, json.JSONDecodeError):\n # Could not get version info\n pass\n else:\n # On success, skip JSON metadata\n pkl_contents = pkl_contents[idx + 1 :]\n\n # Pickle files may contain relative paths that must be resolved relative\n # to the working directory, so use indirectory while attempting to load\n unpkl = None\n try:\n with indirectory(infile.parent):\n unpkl = pickle.loads(pkl_contents)\n except UnicodeDecodeError:\n # Was this pickle created with Python 2.x?\n with indirectory(infile.parent):\n unpkl = pickle.loads(pkl_contents, fix_imports=True, encoding=\"utf-8\")\n fmlogger.info(\"Successfully loaded pkl in compatibility mode.\")\n # Unpickling problems\n except Exception as e:\n if pkl_metadata and \"version\" in pkl_metadata:\n if pkl_metadata[\"version\"] != version:\n fmlogger.error(\n \"\"\"\\\nAttempted to open a results file generated by Nipype version %s, \\\nwith an incompatible Nipype version (%s)\"\"\",\n pkl_metadata[\"version\"],\n version,\n )\n raise e\n fmlogger.warning(\n \"\"\"\\\nNo metadata was found in the pkl file. Make sure you are currently using \\\nthe same Nipype version from the generated pkl.\"\"\"\n )\n raise e\n\n if unpkl is None:\n raise ValueError(\"Loading %s resulted in None.\" % infile)\n\n return unpkl\n\n\ndef crash2txt(filename, record):\n \"\"\" Write out plain text crash file \"\"\"\n with open(filename, \"w\") as fp:\n if \"node\" in record:\n node = record[\"node\"]\n fp.write(\"Node: {}\\n\".format(node.fullname))\n fp.write(\"Working directory: {}\\n\".format(node.output_dir()))\n fp.write(\"\\n\")\n fp.write(\"Node inputs:\\n{}\\n\".format(node.inputs))\n fp.write(\"\".join(record[\"traceback\"]))\n\n\ndef read_stream(stream, logger=None, encoding=None):\n \"\"\"\n Robustly reads a stream, sending a warning to a logger\n if some decoding error was raised.\n\n >>> read_stream(bytearray([65, 0xc7, 65, 10, 66])) # doctest: +ELLIPSIS\n ['A...A', 'B']\n\n\n \"\"\"\n default_encoding = encoding or locale.getdefaultlocale()[1] or \"UTF-8\"\n logger = logger or fmlogger\n try:\n out = stream.decode(default_encoding)\n except UnicodeDecodeError as err:\n out = stream.decode(default_encoding, errors=\"replace\")\n logger.warning(\"Error decoding string: %s\", err)\n return out.splitlines()\n\n\ndef savepkl(filename, record, versioning=False):\n from io import BytesIO\n\n with BytesIO() as f:\n if versioning:\n metadata = json.dumps({\"version\": version})\n f.write(metadata.encode(\"utf-8\"))\n f.write(\"\\n\".encode(\"utf-8\"))\n pickle.dump(record, f)\n content = f.getvalue()\n\n pkl_open = gzip.open if filename.endswith(\".pklz\") else open\n tmpfile = filename + \".tmp\"\n with pkl_open(tmpfile, \"wb\") as pkl_file:\n pkl_file.write(content)\n os.rename(tmpfile, filename)\n\n\nrst_levels = [\"=\", \"-\", \"~\", \"+\"]\n\n\ndef write_rst_header(header, level=0):\n return \"\\n\".join((header, \"\".join([rst_levels[level] for _ in header]))) + \"\\n\\n\"\n\n\ndef write_rst_list(items, prefix=\"\"):\n out = []\n for item in items:\n out.append(\"{} {}\".format(prefix, str(item)))\n return \"\\n\".join(out) + \"\\n\\n\"\n\n\ndef write_rst_dict(info, prefix=\"\"):\n out = []\n for key, value in sorted(info.items()):\n out.append(\"{}* {} : {}\".format(prefix, key, str(value)))\n return \"\\n\".join(out) + \"\\n\\n\"\n\n\ndef dist_is_editable(dist):\n \"\"\"Is distribution an editable install?\n\n Parameters\n ----------\n dist : string\n Package name\n\n # Borrowed from `pip`'s' API\n \"\"\"\n for path_item in sys.path:\n egg_link = op.join(path_item, dist + \".egg-link\")\n if op.isfile(egg_link):\n return True\n return False\n\n\ndef emptydirs(path, noexist_ok=False):\n \"\"\"\n Empty an existing directory, without deleting it. Do not\n raise error if the path does not exist and noexist_ok is True.\n\n Parameters\n ----------\n path : directory that should be empty\n\n \"\"\"\n fmlogger.debug(\"Removing contents of %s\", path)\n\n if noexist_ok and not op.exists(path):\n return True\n\n if op.isfile(path):\n raise OSError('path \"%s\" should be a directory' % path)\n\n try:\n shutil.rmtree(path)\n except OSError as ex:\n elcont = os.listdir(path)\n if ex.errno == errno.ENOTEMPTY and not elcont:\n fmlogger.warning(\n \"An exception was raised trying to remove old %s, but the path\"\n \" seems empty. Is it an NFS mount?. Passing the exception.\",\n path,\n )\n elif ex.errno == errno.ENOTEMPTY and elcont:\n fmlogger.debug(\"Folder %s contents (%d items).\", path, len(elcont))\n raise ex\n else:\n raise ex\n\n os.makedirs(path)\n\n\ndef silentrm(filename):\n \"\"\"\n Equivalent to ``rm -f``, returns ``False`` if the file did not\n exist.\n\n Parameters\n ----------\n\n filename : str\n file to be deleted\n\n \"\"\"\n try:\n os.remove(filename)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise\n return False\n return True\n\n\ndef which(cmd, env=None, pathext=None):\n \"\"\"\n Return the path to an executable which would be run if the given\n cmd was called. If no cmd would be called, return ``None``.\n\n Code for Python < 3.3 is based on a code snippet from\n http://orip.org/2009/08/python-checking-if-executable-exists-in.html\n\n \"\"\"\n\n if pathext is None:\n pathext = os.getenv(\"PATHEXT\", \"\").split(os.pathsep)\n pathext.insert(0, \"\")\n\n path = os.getenv(\"PATH\", os.defpath)\n if env and \"PATH\" in env:\n path = env.get(\"PATH\")\n\n for ext in pathext:\n filename = shutil.which(cmd + ext, path=path)\n if filename:\n return filename\n return None\n\n\ndef get_dependencies(name, environ):\n \"\"\"Return library dependencies of a dynamically linked executable\n\n Uses otool on darwin, ldd on linux. Currently doesn't support windows.\n\n \"\"\"\n command = None\n if sys.platform == \"darwin\":\n command = \"otool -L `which %s`\" % name\n elif \"linux\" in sys.platform:\n command = \"ldd `which %s`\" % name\n else:\n return \"Platform %s not supported\" % sys.platform\n\n deps = None\n try:\n proc = sp.Popen(\n command, stdout=sp.PIPE, stderr=sp.PIPE, shell=True, env=environ\n )\n o, e = proc.communicate()\n deps = o.rstrip()\n except Exception as ex:\n deps = '\"%s\" failed' % command\n fmlogger.warning(\n \"Could not get dependencies of %s. Error:\\n%s\", name, ex.message\n )\n return deps\n\n\ndef canonicalize_env(env):\n \"\"\"Windows requires that environment be dicts with bytes as keys and values\n This function converts any unicode entries for Windows only, returning the\n dictionary untouched in other environments.\n\n Parameters\n ----------\n env : dict\n environment dictionary with unicode or bytes keys and values\n\n Returns\n -------\n env : dict\n Windows: environment dictionary with bytes keys and values\n Other: untouched input ``env``\n \"\"\"\n if os.name != \"nt\":\n return env\n\n out_env = {}\n for key, val in env.items():\n if not isinstance(key, bytes):\n key = key.encode(\"utf-8\")\n if not isinstance(val, bytes):\n val = val.encode(\"utf-8\")\n out_env[key] = val\n return out_env\n\n\ndef relpath(path, start=None):\n \"\"\"Return a relative version of a path\"\"\"\n\n try:\n return op.relpath(path, start)\n except AttributeError:\n pass\n\n if start is None:\n start = os.curdir\n if not path:\n raise ValueError(\"no path specified\")\n start_list = op.abspath(start).split(op.sep)\n path_list = op.abspath(path).split(op.sep)\n if start_list[0].lower() != path_list[0].lower():\n unc_path, rest = op.splitunc(path)\n unc_start, rest = op.splitunc(start)\n if bool(unc_path) ^ bool(unc_start):\n raise ValueError(\n (\"Cannot mix UNC and non-UNC paths \" \"(%s and %s)\") % (path, start)\n )\n else:\n raise ValueError(\n \"path is on drive %s, start on drive %s\" % (path_list[0], start_list[0])\n )\n # Work out how much of the filepath is shared by start and path.\n for i in range(min(len(start_list), len(path_list))):\n if start_list[i].lower() != path_list[i].lower():\n break\n else:\n i += 1\n\n rel_list = [op.pardir] * (len(start_list) - i) + path_list[i:]\n if not rel_list:\n return os.curdir\n return op.join(*rel_list)\n\n\[email protected]\ndef indirectory(path):\n cwd = os.getcwd()\n os.chdir(str(path))\n try:\n yield\n finally:\n os.chdir(cwd)\n",
"path": "nipype/utils/filemanip.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Miscellaneous file manipulation functions\n\"\"\"\nimport sys\nimport pickle\nimport errno\nimport subprocess as sp\nimport gzip\nimport hashlib\nimport locale\nfrom hashlib import md5\nimport os\nimport os.path as op\nimport re\nimport shutil\nimport contextlib\nimport posixpath\nfrom pathlib import Path\nimport simplejson as json\nfrom time import sleep, time\n\nfrom .. import logging, config, __version__ as version\nfrom .misc import is_container\n\nfmlogger = logging.getLogger(\"nipype.utils\")\n\nrelated_filetype_sets = [\n (\".hdr\", \".img\", \".mat\"),\n (\".nii\", \".mat\"),\n (\".BRIK\", \".HEAD\"),\n]\n\n\ndef _resolve_with_filenotfound(path, **kwargs):\n \"\"\" Raise FileNotFoundError instead of OSError \"\"\"\n try:\n return path.resolve(**kwargs)\n except OSError as e:\n if isinstance(e, FileNotFoundError):\n raise\n raise FileNotFoundError(str(path))\n\n\ndef path_resolve(path, strict=False):\n try:\n return _resolve_with_filenotfound(path, strict=strict)\n except TypeError: # PY35\n pass\n\n path = path.absolute()\n if strict or path.exists():\n return _resolve_with_filenotfound(path)\n\n # This is a hacky shortcut, using path.absolute() unmodified\n # In cases where the existing part of the path contains a\n # symlink, different results will be produced\n return path\n\n\ndef split_filename(fname):\n \"\"\"Split a filename into parts: path, base filename and extension.\n\n Parameters\n ----------\n fname : str\n file or path name\n\n Returns\n -------\n pth : str\n base path from fname\n fname : str\n filename from fname, without extension\n ext : str\n file extension from fname\n\n Examples\n --------\n >>> from nipype.utils.filemanip import split_filename\n >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')\n >>> pth\n '/home/data'\n\n >>> fname\n 'subject'\n\n >>> ext\n '.nii.gz'\n\n \"\"\"\n\n special_extensions = [\".nii.gz\", \".tar.gz\", \".niml.dset\"]\n\n pth = op.dirname(fname)\n fname = op.basename(fname)\n\n ext = None\n for special_ext in special_extensions:\n ext_len = len(special_ext)\n if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()):\n ext = fname[-ext_len:]\n fname = fname[:-ext_len]\n break\n if not ext:\n fname, ext = op.splitext(fname)\n\n return pth, fname, ext\n\n\ndef fname_presuffix(fname, prefix=\"\", suffix=\"\", newpath=None, use_ext=True):\n \"\"\"Manipulates path and name of input filename\n\n Parameters\n ----------\n fname : string\n A filename (may or may not include path)\n prefix : string\n Characters to prepend to the filename\n suffix : string\n Characters to append to the filename\n newpath : string\n Path to replace the path of the input fname\n use_ext : boolean\n If True (default), appends the extension of the original file\n to the output name.\n\n Returns\n -------\n Absolute path of the modified filename\n\n >>> from nipype.utils.filemanip import fname_presuffix\n >>> fname = 'foo.nii.gz'\n >>> fname_presuffix(fname,'pre','post','/tmp')\n '/tmp/prefoopost.nii.gz'\n\n >>> from nipype.interfaces.base import Undefined\n >>> fname_presuffix(fname, 'pre', 'post', Undefined) == \\\n fname_presuffix(fname, 'pre', 'post')\n True\n\n \"\"\"\n pth, fname, ext = split_filename(fname)\n if not use_ext:\n ext = \"\"\n\n # No need for isdefined: bool(Undefined) evaluates to False\n if newpath:\n pth = op.abspath(newpath)\n return op.join(pth, prefix + fname + suffix + ext)\n\n\ndef fnames_presuffix(fnames, prefix=\"\", suffix=\"\", newpath=None, use_ext=True):\n \"\"\"Calls fname_presuffix for a list of files.\n \"\"\"\n f2 = []\n for fname in fnames:\n f2.append(fname_presuffix(fname, prefix, suffix, newpath, use_ext))\n return f2\n\n\ndef hash_rename(filename, hashvalue):\n \"\"\"renames a file given original filename and hash\n and sets path to output_directory\n \"\"\"\n path, name, ext = split_filename(filename)\n newfilename = \"\".join((name, \"_0x\", hashvalue, ext))\n return op.join(path, newfilename)\n\n\ndef check_forhash(filename):\n \"\"\"checks if file has a hash in its filename\"\"\"\n if isinstance(filename, list):\n filename = filename[0]\n path, name = op.split(filename)\n if re.search(\"(_0x[a-z0-9]{32})\", name):\n hashvalue = re.findall(\"(_0x[a-z0-9]{32})\", name)\n return True, hashvalue\n else:\n return False, None\n\n\ndef hash_infile(afile, chunk_len=8192, crypto=hashlib.md5, raise_notfound=False):\n \"\"\"\n Computes hash of a file using 'crypto' module\n\n >>> hash_infile('smri_ants_registration_settings.json')\n 'f225785dfb0db9032aa5a0e4f2c730ad'\n\n >>> hash_infile('surf01.vtk')\n 'fdf1cf359b4e346034372cdeb58f9a88'\n\n >>> hash_infile('spminfo')\n '0dc55e3888c98a182dab179b976dfffc'\n\n >>> hash_infile('fsl_motion_outliers_fd.txt')\n 'defd1812c22405b1ee4431aac5bbdd73'\n\n\n \"\"\"\n if not op.isfile(afile):\n if raise_notfound:\n raise RuntimeError('File \"%s\" not found.' % afile)\n return None\n\n crypto_obj = crypto()\n with open(afile, \"rb\") as fp:\n while True:\n data = fp.read(chunk_len)\n if not data:\n break\n crypto_obj.update(data)\n return crypto_obj.hexdigest()\n\n\ndef hash_timestamp(afile):\n \"\"\" Computes md5 hash of the timestamp of a file \"\"\"\n md5hex = None\n if op.isfile(afile):\n md5obj = md5()\n stat = os.stat(afile)\n md5obj.update(str(stat.st_size).encode())\n md5obj.update(str(stat.st_mtime).encode())\n md5hex = md5obj.hexdigest()\n return md5hex\n\n\ndef _parse_mount_table(exit_code, output):\n \"\"\"Parses the output of ``mount`` to produce (path, fs_type) pairs\n\n Separated from _generate_cifs_table to enable testing logic with real\n outputs\n \"\"\"\n # Not POSIX\n if exit_code != 0:\n return []\n\n # Linux mount example: sysfs on /sys type sysfs (rw,nosuid,nodev,noexec)\n # <PATH>^^^^ ^^^^^<FSTYPE>\n # OSX mount example: /dev/disk2 on / (hfs, local, journaled)\n # <PATH>^ ^^^<FSTYPE>\n pattern = re.compile(r\".*? on (/.*?) (?:type |\\()([^\\s,\\)]+)\")\n\n # Keep line and match for error reporting (match == None on failure)\n # Ignore empty lines\n matches = [(l, pattern.match(l)) for l in output.strip().splitlines() if l]\n\n # (path, fstype) tuples, sorted by path length (longest first)\n mount_info = sorted(\n (match.groups() for _, match in matches if match is not None),\n key=lambda x: len(x[0]),\n reverse=True,\n )\n cifs_paths = [path for path, fstype in mount_info if fstype.lower() == \"cifs\"]\n\n # Report failures as warnings\n for line, match in matches:\n if match is None:\n fmlogger.debug(\"Cannot parse mount line: '%s'\", line)\n\n return [\n mount\n for mount in mount_info\n if any(mount[0].startswith(path) for path in cifs_paths)\n ]\n\n\ndef _generate_cifs_table():\n \"\"\"Construct a reverse-length-ordered list of mount points that\n fall under a CIFS mount.\n\n This precomputation allows efficient checking for whether a given path\n would be on a CIFS filesystem.\n\n On systems without a ``mount`` command, or with no CIFS mounts, returns an\n empty list.\n \"\"\"\n exit_code, output = sp.getstatusoutput(\"mount\")\n return _parse_mount_table(exit_code, output)\n\n\n_cifs_table = _generate_cifs_table()\n\n\ndef on_cifs(fname):\n \"\"\"\n Checks whether a file path is on a CIFS filesystem mounted in a POSIX\n host (i.e., has the ``mount`` command).\n\n On Windows, Docker mounts host directories into containers through CIFS\n shares, which has support for Minshall+French symlinks, or text files that\n the CIFS driver exposes to the OS as symlinks.\n We have found that under concurrent access to the filesystem, this feature\n can result in failures to create or read recently-created symlinks,\n leading to inconsistent behavior and ``FileNotFoundError``.\n\n This check is written to support disabling symlinks on CIFS shares.\n\n \"\"\"\n # Only the first match (most recent parent) counts\n for fspath, fstype in _cifs_table:\n if fname.startswith(fspath):\n return fstype == \"cifs\"\n return False\n\n\ndef copyfile(\n originalfile,\n newfile,\n copy=False,\n create_new=False,\n hashmethod=None,\n use_hardlink=False,\n copy_related_files=True,\n):\n \"\"\"Copy or link ``originalfile`` to ``newfile``.\n\n If ``use_hardlink`` is True, and the file can be hard-linked, then a\n link is created, instead of copying the file.\n\n If a hard link is not created and ``copy`` is False, then a symbolic\n link is created.\n\n Parameters\n ----------\n originalfile : str\n full path to original file\n newfile : str\n full path to new file\n copy : Bool\n specifies whether to copy or symlink files\n (default=False) but only for POSIX systems\n use_hardlink : Bool\n specifies whether to hard-link files, when able\n (Default=False), taking precedence over copy\n copy_related_files : Bool\n specifies whether to also operate on related files, as defined in\n ``related_filetype_sets``\n\n Returns\n -------\n None\n\n \"\"\"\n newhash = None\n orighash = None\n fmlogger.debug(newfile)\n\n if create_new:\n while op.exists(newfile):\n base, fname, ext = split_filename(newfile)\n s = re.search(\"_c[0-9]{4,4}$\", fname)\n i = 0\n if s:\n i = int(s.group()[2:]) + 1\n fname = fname[:-6] + \"_c%04d\" % i\n else:\n fname += \"_c%04d\" % i\n newfile = base + os.sep + fname + ext\n\n if hashmethod is None:\n hashmethod = config.get(\"execution\", \"hash_method\").lower()\n\n # Don't try creating symlinks on CIFS\n if copy is False and on_cifs(newfile):\n copy = True\n\n # Existing file\n # -------------\n # Options:\n # symlink\n # to regular file originalfile (keep if symlinking)\n # to same dest as symlink originalfile (keep if symlinking)\n # to other file (unlink)\n # regular file\n # hard link to originalfile (keep)\n # copy of file (same hash) (keep)\n # different file (diff hash) (unlink)\n keep = False\n if op.lexists(newfile):\n if op.islink(newfile):\n if all(\n (\n os.readlink(newfile) == op.realpath(originalfile),\n not use_hardlink,\n not copy,\n )\n ):\n keep = True\n elif posixpath.samefile(newfile, originalfile):\n keep = True\n else:\n if hashmethod == \"timestamp\":\n hashfn = hash_timestamp\n elif hashmethod == \"content\":\n hashfn = hash_infile\n else:\n raise AttributeError(\"Unknown hash method found:\", hashmethod)\n newhash = hashfn(newfile)\n fmlogger.debug(\n \"File: %s already exists,%s, copy:%d\", newfile, newhash, copy\n )\n orighash = hashfn(originalfile)\n keep = newhash == orighash\n if keep:\n fmlogger.debug(\n \"File: %s already exists, not overwriting, copy:%d\", newfile, copy\n )\n else:\n os.unlink(newfile)\n\n # New file\n # --------\n # use_hardlink & can_hardlink => hardlink\n # ~hardlink & ~copy & can_symlink => symlink\n # ~hardlink & ~symlink => copy\n if not keep and use_hardlink:\n try:\n fmlogger.debug(\"Linking File: %s->%s\", newfile, originalfile)\n # Use realpath to avoid hardlinking symlinks\n os.link(op.realpath(originalfile), newfile)\n except OSError:\n use_hardlink = False # Disable hardlink for associated files\n else:\n keep = True\n\n if not keep and not copy and os.name == \"posix\":\n try:\n fmlogger.debug(\"Symlinking File: %s->%s\", newfile, originalfile)\n os.symlink(originalfile, newfile)\n except OSError:\n copy = True # Disable symlink for associated files\n else:\n keep = True\n\n if not keep:\n try:\n fmlogger.debug(\"Copying File: %s->%s\", newfile, originalfile)\n shutil.copyfile(originalfile, newfile)\n except shutil.Error as e:\n fmlogger.warning(e.message)\n\n # Associated files\n if copy_related_files:\n related_file_pairs = (\n get_related_files(f, include_this_file=False)\n for f in (originalfile, newfile)\n )\n for alt_ofile, alt_nfile in zip(*related_file_pairs):\n if op.exists(alt_ofile):\n copyfile(\n alt_ofile,\n alt_nfile,\n copy,\n hashmethod=hashmethod,\n use_hardlink=use_hardlink,\n copy_related_files=False,\n )\n\n return newfile\n\n\ndef get_related_files(filename, include_this_file=True):\n \"\"\"Returns a list of related files, as defined in\n ``related_filetype_sets``, for a filename. (e.g., Nifti-Pair, Analyze (SPM)\n and AFNI files).\n\n Parameters\n ----------\n filename : str\n File name to find related filetypes of.\n include_this_file : bool\n If true, output includes the input filename.\n \"\"\"\n related_files = []\n path, name, this_type = split_filename(filename)\n for type_set in related_filetype_sets:\n if this_type in type_set:\n for related_type in type_set:\n if include_this_file or related_type != this_type:\n related_files.append(op.join(path, name + related_type))\n if not len(related_files):\n related_files = [filename]\n return related_files\n\n\ndef copyfiles(filelist, dest, copy=False, create_new=False):\n \"\"\"Copy or symlink files in ``filelist`` to ``dest`` directory.\n\n Parameters\n ----------\n filelist : list\n List of files to copy.\n dest : path/files\n full path to destination. If it is a list of length greater\n than 1, then it assumes that these are the names of the new\n files.\n copy : Bool\n specifies whether to copy or symlink files\n (default=False) but only for posix systems\n\n Returns\n -------\n None\n\n \"\"\"\n outfiles = ensure_list(dest)\n newfiles = []\n for i, f in enumerate(ensure_list(filelist)):\n if isinstance(f, list):\n newfiles.insert(i, copyfiles(f, dest, copy=copy, create_new=create_new))\n else:\n if len(outfiles) > 1:\n destfile = outfiles[i]\n else:\n destfile = fname_presuffix(f, newpath=outfiles[0])\n destfile = copyfile(f, destfile, copy, create_new=create_new)\n newfiles.insert(i, destfile)\n return newfiles\n\n\ndef ensure_list(filename):\n \"\"\"Returns a list given either a string or a list\n \"\"\"\n if isinstance(filename, (str, bytes)):\n return [filename]\n elif isinstance(filename, list):\n return filename\n elif is_container(filename):\n return [x for x in filename]\n else:\n return None\n\n\ndef simplify_list(filelist):\n \"\"\"Returns a list if filelist is a list of length greater than 1,\n otherwise returns the first element\n \"\"\"\n if len(filelist) > 1:\n return filelist\n else:\n return filelist[0]\n\n\nfilename_to_list = ensure_list\nlist_to_filename = simplify_list\n\n\ndef check_depends(targets, dependencies):\n \"\"\"Return true if all targets exist and are newer than all dependencies.\n\n An OSError will be raised if there are missing dependencies.\n \"\"\"\n tgts = ensure_list(targets)\n deps = ensure_list(dependencies)\n return all(map(op.exists, tgts)) and min(map(op.getmtime, tgts)) > max(\n list(map(op.getmtime, deps)) + [0]\n )\n\n\ndef save_json(filename, data):\n \"\"\"Save data to a json file\n\n Parameters\n ----------\n filename : str\n Filename to save data in.\n data : dict\n Dictionary to save in json file.\n\n \"\"\"\n mode = \"w\"\n with open(filename, mode) as fp:\n json.dump(data, fp, sort_keys=True, indent=4)\n\n\ndef load_json(filename):\n \"\"\"Load data from a json file\n\n Parameters\n ----------\n filename : str\n Filename to load data from.\n\n Returns\n -------\n data : dict\n\n \"\"\"\n\n with open(filename, \"r\") as fp:\n data = json.load(fp)\n return data\n\n\ndef loadcrash(infile, *args):\n if infile.endswith(\"pkl\") or infile.endswith(\"pklz\"):\n return loadpkl(infile)\n else:\n raise ValueError(\"Only pickled crashfiles are supported\")\n\n\ndef loadpkl(infile):\n \"\"\"Load a zipped or plain cPickled file.\"\"\"\n infile = Path(infile)\n fmlogger.debug(\"Loading pkl: %s\", infile)\n pklopen = gzip.open if infile.suffix == \".pklz\" else open\n\n t = time()\n timeout = float(config.get(\"execution\", \"job_finished_timeout\"))\n timed_out = True\n while (time() - t) < timeout:\n if infile.exists():\n timed_out = False\n break\n fmlogger.debug(\"'{}' missing; waiting 2s\".format(infile))\n sleep(2)\n if timed_out:\n error_message = (\n \"Result file {0} expected, but \"\n \"does not exist after ({1}) \"\n \"seconds.\".format(infile, timeout)\n )\n raise IOError(error_message)\n\n with pklopen(str(infile), \"rb\") as pkl_file:\n pkl_contents = pkl_file.read()\n\n pkl_metadata = None\n\n # Look if pkl file contains version metadata\n idx = pkl_contents.find(b\"\\n\")\n if idx >= 0:\n try:\n pkl_metadata = json.loads(pkl_contents[:idx])\n except (UnicodeDecodeError, json.JSONDecodeError):\n # Could not get version info\n pass\n else:\n # On success, skip JSON metadata\n pkl_contents = pkl_contents[idx + 1 :]\n\n # Pickle files may contain relative paths that must be resolved relative\n # to the working directory, so use indirectory while attempting to load\n unpkl = None\n try:\n with indirectory(infile.parent):\n unpkl = pickle.loads(pkl_contents)\n except UnicodeDecodeError:\n # Was this pickle created with Python 2.x?\n with indirectory(infile.parent):\n unpkl = pickle.loads(pkl_contents, fix_imports=True, encoding=\"utf-8\")\n fmlogger.info(\"Successfully loaded pkl in compatibility mode.\")\n # Unpickling problems\n except Exception as e:\n if pkl_metadata and \"version\" in pkl_metadata:\n if pkl_metadata[\"version\"] != version:\n fmlogger.error(\n \"\"\"\\\nAttempted to open a results file generated by Nipype version %s, \\\nwith an incompatible Nipype version (%s)\"\"\",\n pkl_metadata[\"version\"],\n version,\n )\n raise e\n fmlogger.warning(\n \"\"\"\\\nNo metadata was found in the pkl file. Make sure you are currently using \\\nthe same Nipype version from the generated pkl.\"\"\"\n )\n raise e\n\n if unpkl is None:\n raise ValueError(\"Loading %s resulted in None.\" % infile)\n\n return unpkl\n\n\ndef crash2txt(filename, record):\n \"\"\" Write out plain text crash file \"\"\"\n with open(filename, \"w\") as fp:\n if \"node\" in record:\n node = record[\"node\"]\n fp.write(\"Node: {}\\n\".format(node.fullname))\n fp.write(\"Working directory: {}\\n\".format(node.output_dir()))\n fp.write(\"\\n\")\n fp.write(\"Node inputs:\\n{}\\n\".format(node.inputs))\n fp.write(\"\".join(record[\"traceback\"]))\n\n\ndef read_stream(stream, logger=None, encoding=None):\n \"\"\"\n Robustly reads a stream, sending a warning to a logger\n if some decoding error was raised.\n\n >>> read_stream(bytearray([65, 0xc7, 65, 10, 66])) # doctest: +ELLIPSIS\n ['A...A', 'B']\n\n\n \"\"\"\n default_encoding = encoding or locale.getdefaultlocale()[1] or \"UTF-8\"\n logger = logger or fmlogger\n try:\n out = stream.decode(default_encoding)\n except UnicodeDecodeError as err:\n out = stream.decode(default_encoding, errors=\"replace\")\n logger.warning(\"Error decoding string: %s\", err)\n return out.splitlines()\n\n\ndef savepkl(filename, record, versioning=False):\n from io import BytesIO\n\n with BytesIO() as f:\n if versioning:\n metadata = json.dumps({\"version\": version})\n f.write(metadata.encode(\"utf-8\"))\n f.write(\"\\n\".encode(\"utf-8\"))\n pickle.dump(record, f)\n content = f.getvalue()\n\n pkl_open = gzip.open if filename.endswith(\".pklz\") else open\n tmpfile = filename + \".tmp\"\n with pkl_open(tmpfile, \"wb\") as pkl_file:\n pkl_file.write(content)\n os.rename(tmpfile, filename)\n\n\nrst_levels = [\"=\", \"-\", \"~\", \"+\"]\n\n\ndef write_rst_header(header, level=0):\n return \"\\n\".join((header, \"\".join([rst_levels[level] for _ in header]))) + \"\\n\\n\"\n\n\ndef write_rst_list(items, prefix=\"\"):\n out = []\n for item in ensure_list(items):\n out.append(\"{} {}\".format(prefix, str(item)))\n return \"\\n\".join(out) + \"\\n\\n\"\n\n\ndef write_rst_dict(info, prefix=\"\"):\n out = []\n for key, value in sorted(info.items()):\n out.append(\"{}* {} : {}\".format(prefix, key, str(value)))\n return \"\\n\".join(out) + \"\\n\\n\"\n\n\ndef dist_is_editable(dist):\n \"\"\"Is distribution an editable install?\n\n Parameters\n ----------\n dist : string\n Package name\n\n # Borrowed from `pip`'s' API\n \"\"\"\n for path_item in sys.path:\n egg_link = op.join(path_item, dist + \".egg-link\")\n if op.isfile(egg_link):\n return True\n return False\n\n\ndef emptydirs(path, noexist_ok=False):\n \"\"\"\n Empty an existing directory, without deleting it. Do not\n raise error if the path does not exist and noexist_ok is True.\n\n Parameters\n ----------\n path : directory that should be empty\n\n \"\"\"\n fmlogger.debug(\"Removing contents of %s\", path)\n\n if noexist_ok and not op.exists(path):\n return True\n\n if op.isfile(path):\n raise OSError('path \"%s\" should be a directory' % path)\n\n try:\n shutil.rmtree(path)\n except OSError as ex:\n elcont = os.listdir(path)\n if ex.errno == errno.ENOTEMPTY and not elcont:\n fmlogger.warning(\n \"An exception was raised trying to remove old %s, but the path\"\n \" seems empty. Is it an NFS mount?. Passing the exception.\",\n path,\n )\n elif ex.errno == errno.ENOTEMPTY and elcont:\n fmlogger.debug(\"Folder %s contents (%d items).\", path, len(elcont))\n raise ex\n else:\n raise ex\n\n os.makedirs(path)\n\n\ndef silentrm(filename):\n \"\"\"\n Equivalent to ``rm -f``, returns ``False`` if the file did not\n exist.\n\n Parameters\n ----------\n\n filename : str\n file to be deleted\n\n \"\"\"\n try:\n os.remove(filename)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise\n return False\n return True\n\n\ndef which(cmd, env=None, pathext=None):\n \"\"\"\n Return the path to an executable which would be run if the given\n cmd was called. If no cmd would be called, return ``None``.\n\n Code for Python < 3.3 is based on a code snippet from\n http://orip.org/2009/08/python-checking-if-executable-exists-in.html\n\n \"\"\"\n\n if pathext is None:\n pathext = os.getenv(\"PATHEXT\", \"\").split(os.pathsep)\n pathext.insert(0, \"\")\n\n path = os.getenv(\"PATH\", os.defpath)\n if env and \"PATH\" in env:\n path = env.get(\"PATH\")\n\n for ext in pathext:\n filename = shutil.which(cmd + ext, path=path)\n if filename:\n return filename\n return None\n\n\ndef get_dependencies(name, environ):\n \"\"\"Return library dependencies of a dynamically linked executable\n\n Uses otool on darwin, ldd on linux. Currently doesn't support windows.\n\n \"\"\"\n command = None\n if sys.platform == \"darwin\":\n command = \"otool -L `which %s`\" % name\n elif \"linux\" in sys.platform:\n command = \"ldd `which %s`\" % name\n else:\n return \"Platform %s not supported\" % sys.platform\n\n deps = None\n try:\n proc = sp.Popen(\n command, stdout=sp.PIPE, stderr=sp.PIPE, shell=True, env=environ\n )\n o, e = proc.communicate()\n deps = o.rstrip()\n except Exception as ex:\n deps = '\"%s\" failed' % command\n fmlogger.warning(\n \"Could not get dependencies of %s. Error:\\n%s\", name, ex.message\n )\n return deps\n\n\ndef canonicalize_env(env):\n \"\"\"Windows requires that environment be dicts with bytes as keys and values\n This function converts any unicode entries for Windows only, returning the\n dictionary untouched in other environments.\n\n Parameters\n ----------\n env : dict\n environment dictionary with unicode or bytes keys and values\n\n Returns\n -------\n env : dict\n Windows: environment dictionary with bytes keys and values\n Other: untouched input ``env``\n \"\"\"\n if os.name != \"nt\":\n return env\n\n out_env = {}\n for key, val in env.items():\n if not isinstance(key, bytes):\n key = key.encode(\"utf-8\")\n if not isinstance(val, bytes):\n val = val.encode(\"utf-8\")\n out_env[key] = val\n return out_env\n\n\ndef relpath(path, start=None):\n \"\"\"Return a relative version of a path\"\"\"\n\n try:\n return op.relpath(path, start)\n except AttributeError:\n pass\n\n if start is None:\n start = os.curdir\n if not path:\n raise ValueError(\"no path specified\")\n start_list = op.abspath(start).split(op.sep)\n path_list = op.abspath(path).split(op.sep)\n if start_list[0].lower() != path_list[0].lower():\n unc_path, rest = op.splitunc(path)\n unc_start, rest = op.splitunc(start)\n if bool(unc_path) ^ bool(unc_start):\n raise ValueError(\n (\"Cannot mix UNC and non-UNC paths \" \"(%s and %s)\") % (path, start)\n )\n else:\n raise ValueError(\n \"path is on drive %s, start on drive %s\" % (path_list[0], start_list[0])\n )\n # Work out how much of the filepath is shared by start and path.\n for i in range(min(len(start_list), len(path_list))):\n if start_list[i].lower() != path_list[i].lower():\n break\n else:\n i += 1\n\n rel_list = [op.pardir] * (len(start_list) - i) + path_list[i:]\n if not rel_list:\n return os.curdir\n return op.join(*rel_list)\n\n\[email protected]\ndef indirectory(path):\n cwd = os.getcwd()\n os.chdir(str(path))\n try:\n yield\n finally:\n os.chdir(cwd)\n",
"path": "nipype/utils/filemanip.py"
}
] | diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py
index 735cc610b6..46c4cc53be 100644
--- a/nipype/utils/filemanip.py
+++ b/nipype/utils/filemanip.py
@@ -736,7 +736,7 @@ def write_rst_header(header, level=0):
def write_rst_list(items, prefix=""):
out = []
- for item in items:
+ for item in ensure_list(items):
out.append("{} {}".format(prefix, str(item)))
return "\n".join(out) + "\n\n"
diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py
index 9c54ff02ee..fed2462548 100644
--- a/nipype/utils/tests/test_filemanip.py
+++ b/nipype/utils/tests/test_filemanip.py
@@ -30,6 +30,7 @@
loadcrash,
savepkl,
path_resolve,
+ write_rst_list,
)
@@ -652,3 +653,17 @@ def test_pickle(tmp_path, save_versioning):
savepkl(pickle_fname, testobj, versioning=save_versioning)
outobj = loadpkl(pickle_fname)
assert outobj == testobj
+
+
[email protected]("items,expected", [
+ ('', ' \n\n'),
+ ('A string', ' A string\n\n'),
+ (['A list', 'Of strings'], ' A list\n Of strings\n\n'),
+ (None, TypeError),
+])
+def test_write_rst_list(tmp_path, items, expected):
+ if items is not None:
+ assert write_rst_list(items) == expected
+ else:
+ with pytest.raises(expected):
+ write_rst_list(items)
|
dbt-labs__dbt-core-1324 | [Stephen Girard] resource list shows sources as None
To be fixed for the 0.13.0 (Stephen Girard) release. An invocation of `dbt run` shows:
```
Found 162 models, 320 tests, 0 archives, 0 analyses, 236 macros, 2 operations, 4 seed files, 34 None
^
|
```
We should also add an assert, as this should fail immediately in development (it's easy to miss!)
| [
{
"content": "import itertools\nimport os\nimport json\nfrom collections import OrderedDict, defaultdict\nimport sqlparse\n\nimport dbt.utils\nimport dbt.include\nimport dbt.tracking\n\nfrom dbt import deprecations\nfrom dbt.utils import get_materialization, NodeType, is_type\nfrom dbt.linker import Linker\n\nimport dbt.compat\nimport dbt.context.runtime\nimport dbt.contracts.project\nimport dbt.exceptions\nimport dbt.flags\nimport dbt.loader\nimport dbt.config\nfrom dbt.contracts.graph.compiled import CompiledNode, CompiledGraph\n\nfrom dbt.clients.system import write_json\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\ngraph_file_name = 'graph.gpickle'\n\n\ndef print_compile_stats(stats):\n names = {\n NodeType.Model: 'models',\n NodeType.Test: 'tests',\n NodeType.Archive: 'archives',\n NodeType.Analysis: 'analyses',\n NodeType.Macro: 'macros',\n NodeType.Operation: 'operations',\n NodeType.Seed: 'seed files',\n }\n\n results = {k: 0 for k in names.keys()}\n results.update(stats)\n\n stat_line = \", \".join(\n [\"{} {}\".format(ct, names.get(t)) for t, ct in results.items()])\n\n logger.info(\"Found {}\".format(stat_line))\n\n\ndef _add_prepended_cte(prepended_ctes, new_cte):\n for dct in prepended_ctes:\n if dct['id'] == new_cte['id']:\n dct['sql'] = new_cte['sql']\n return\n prepended_ctes.append(new_cte)\n\n\ndef _extend_prepended_ctes(prepended_ctes, new_prepended_ctes):\n for new_cte in new_prepended_ctes:\n _add_prepended_cte(prepended_ctes, new_cte)\n\n\ndef prepend_ctes(model, manifest):\n model, _, manifest = recursively_prepend_ctes(model, manifest)\n\n return (model, manifest)\n\n\ndef recursively_prepend_ctes(model, manifest):\n if model.extra_ctes_injected:\n return (model, model.extra_ctes, manifest)\n\n if dbt.flags.STRICT_MODE:\n # ensure that the cte we're adding to is compiled\n CompiledNode(**model.serialize())\n\n prepended_ctes = []\n\n for cte in model.extra_ctes:\n cte_id = cte['id']\n cte_to_add = manifest.nodes.get(cte_id)\n cte_to_add, new_prepended_ctes, manifest = recursively_prepend_ctes(\n cte_to_add, manifest)\n _extend_prepended_ctes(prepended_ctes, new_prepended_ctes)\n new_cte_name = '__dbt__CTE__{}'.format(cte_to_add.get('name'))\n sql = ' {} as (\\n{}\\n)'.format(new_cte_name, cte_to_add.compiled_sql)\n _add_prepended_cte(prepended_ctes, {'id': cte_id, 'sql': sql})\n\n model.prepend_ctes(prepended_ctes)\n\n manifest.nodes[model.unique_id] = model\n\n return (model, prepended_ctes, manifest)\n\n\nclass Compiler(object):\n def __init__(self, config):\n self.config = config\n\n def initialize(self):\n dbt.clients.system.make_directory(self.config.target_path)\n dbt.clients.system.make_directory(self.config.modules_path)\n\n def compile_node(self, node, manifest, extra_context=None):\n if extra_context is None:\n extra_context = {}\n\n logger.debug(\"Compiling {}\".format(node.get('unique_id')))\n\n data = node.to_dict()\n data.update({\n 'compiled': False,\n 'compiled_sql': None,\n 'extra_ctes_injected': False,\n 'extra_ctes': [],\n 'injected_sql': None,\n })\n compiled_node = CompiledNode(**data)\n\n context = dbt.context.runtime.generate(\n compiled_node, self.config, manifest)\n context.update(extra_context)\n\n compiled_node.compiled_sql = dbt.clients.jinja.get_rendered(\n node.get('raw_sql'),\n context,\n node)\n\n compiled_node.compiled = True\n\n injected_node, _ = prepend_ctes(compiled_node, manifest)\n\n should_wrap = {NodeType.Test, NodeType.Operation}\n if injected_node.resource_type in should_wrap:\n # data tests get wrapped in count(*)\n # TODO : move this somewhere more reasonable\n if 'data' in injected_node.tags and \\\n is_type(injected_node, NodeType.Test):\n injected_node.wrapped_sql = (\n \"select count(*) from (\\n{test_sql}\\n) sbq\").format(\n test_sql=injected_node.injected_sql)\n else:\n # don't wrap schema tests or analyses.\n injected_node.wrapped_sql = injected_node.injected_sql\n\n elif is_type(injected_node, NodeType.Archive):\n # unfortunately we do everything automagically for\n # archives. in the future it'd be nice to generate\n # the SQL at the parser level.\n pass\n\n elif(is_type(injected_node, NodeType.Model) and\n get_materialization(injected_node) == 'ephemeral'):\n pass\n\n else:\n injected_node.wrapped_sql = None\n\n return injected_node\n\n def write_graph_file(self, linker, manifest):\n filename = graph_file_name\n graph_path = os.path.join(self.config.target_path, filename)\n linker.write_graph(graph_path, manifest)\n\n def link_node(self, linker, node, manifest):\n linker.add_node(node.unique_id)\n\n for dependency in node.depends_on_nodes:\n if manifest.nodes.get(dependency):\n linker.dependency(\n node.unique_id,\n (manifest.nodes.get(dependency).unique_id))\n else:\n dbt.exceptions.dependency_not_found(node, dependency)\n\n def link_graph(self, linker, manifest):\n for node in manifest.nodes.values():\n self.link_node(linker, node, manifest)\n\n cycle = linker.find_cycles()\n\n if cycle:\n raise RuntimeError(\"Found a cycle: {}\".format(cycle))\n\n def compile(self, manifest):\n linker = Linker()\n\n self.link_graph(linker, manifest)\n\n stats = defaultdict(int)\n\n for node_name, node in itertools.chain(\n manifest.nodes.items(),\n manifest.macros.items()):\n stats[node.resource_type] += 1\n\n self.write_graph_file(linker, manifest)\n print_compile_stats(stats)\n\n return linker\n\n\ndef compile_manifest(config, manifest):\n compiler = Compiler(config)\n compiler.initialize()\n return compiler.compile(manifest)\n\n\ndef compile_node(adapter, config, node, manifest, extra_context):\n compiler = Compiler(config)\n node = compiler.compile_node(node, manifest, extra_context)\n node = _inject_runtime_config(adapter, node, extra_context)\n\n if(node.injected_sql is not None and\n not (dbt.utils.is_type(node, NodeType.Archive))):\n logger.debug('Writing injected SQL for node \"{}\"'.format(\n node.unique_id))\n\n written_path = dbt.writer.write_node(\n node,\n config.target_path,\n 'compiled',\n node.injected_sql)\n\n node.build_path = written_path\n\n return node\n\n\ndef _inject_runtime_config(adapter, node, extra_context):\n wrapped_sql = node.wrapped_sql\n context = _node_context(adapter, node)\n context.update(extra_context)\n sql = dbt.clients.jinja.get_rendered(wrapped_sql, context)\n node.wrapped_sql = sql\n return node\n\n\ndef _node_context(adapter, node):\n return {\n \"run_started_at\": dbt.tracking.active_user.run_started_at,\n \"invocation_id\": dbt.tracking.active_user.invocation_id,\n }\n",
"path": "core/dbt/compilation.py"
}
] | [
{
"content": "import itertools\nimport os\nimport json\nfrom collections import OrderedDict, defaultdict\nimport sqlparse\n\nimport dbt.utils\nimport dbt.include\nimport dbt.tracking\n\nfrom dbt import deprecations\nfrom dbt.utils import get_materialization, NodeType, is_type\nfrom dbt.linker import Linker\n\nimport dbt.compat\nimport dbt.context.runtime\nimport dbt.contracts.project\nimport dbt.exceptions\nimport dbt.flags\nimport dbt.loader\nimport dbt.config\nfrom dbt.contracts.graph.compiled import CompiledNode, CompiledGraph\n\nfrom dbt.clients.system import write_json\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\ngraph_file_name = 'graph.gpickle'\n\n\ndef print_compile_stats(stats):\n names = {\n NodeType.Model: 'models',\n NodeType.Test: 'tests',\n NodeType.Archive: 'archives',\n NodeType.Analysis: 'analyses',\n NodeType.Macro: 'macros',\n NodeType.Operation: 'operations',\n NodeType.Seed: 'seed files',\n NodeType.Source: 'sources',\n }\n\n results = {k: 0 for k in names.keys()}\n results.update(stats)\n\n stat_line = \", \".join(\n [\"{} {}\".format(ct, names.get(t)) for t, ct in results.items()])\n\n logger.info(\"Found {}\".format(stat_line))\n\n\ndef _add_prepended_cte(prepended_ctes, new_cte):\n for dct in prepended_ctes:\n if dct['id'] == new_cte['id']:\n dct['sql'] = new_cte['sql']\n return\n prepended_ctes.append(new_cte)\n\n\ndef _extend_prepended_ctes(prepended_ctes, new_prepended_ctes):\n for new_cte in new_prepended_ctes:\n _add_prepended_cte(prepended_ctes, new_cte)\n\n\ndef prepend_ctes(model, manifest):\n model, _, manifest = recursively_prepend_ctes(model, manifest)\n\n return (model, manifest)\n\n\ndef recursively_prepend_ctes(model, manifest):\n if model.extra_ctes_injected:\n return (model, model.extra_ctes, manifest)\n\n if dbt.flags.STRICT_MODE:\n # ensure that the cte we're adding to is compiled\n CompiledNode(**model.serialize())\n\n prepended_ctes = []\n\n for cte in model.extra_ctes:\n cte_id = cte['id']\n cte_to_add = manifest.nodes.get(cte_id)\n cte_to_add, new_prepended_ctes, manifest = recursively_prepend_ctes(\n cte_to_add, manifest)\n _extend_prepended_ctes(prepended_ctes, new_prepended_ctes)\n new_cte_name = '__dbt__CTE__{}'.format(cte_to_add.get('name'))\n sql = ' {} as (\\n{}\\n)'.format(new_cte_name, cte_to_add.compiled_sql)\n _add_prepended_cte(prepended_ctes, {'id': cte_id, 'sql': sql})\n\n model.prepend_ctes(prepended_ctes)\n\n manifest.nodes[model.unique_id] = model\n\n return (model, prepended_ctes, manifest)\n\n\nclass Compiler(object):\n def __init__(self, config):\n self.config = config\n\n def initialize(self):\n dbt.clients.system.make_directory(self.config.target_path)\n dbt.clients.system.make_directory(self.config.modules_path)\n\n def compile_node(self, node, manifest, extra_context=None):\n if extra_context is None:\n extra_context = {}\n\n logger.debug(\"Compiling {}\".format(node.get('unique_id')))\n\n data = node.to_dict()\n data.update({\n 'compiled': False,\n 'compiled_sql': None,\n 'extra_ctes_injected': False,\n 'extra_ctes': [],\n 'injected_sql': None,\n })\n compiled_node = CompiledNode(**data)\n\n context = dbt.context.runtime.generate(\n compiled_node, self.config, manifest)\n context.update(extra_context)\n\n compiled_node.compiled_sql = dbt.clients.jinja.get_rendered(\n node.get('raw_sql'),\n context,\n node)\n\n compiled_node.compiled = True\n\n injected_node, _ = prepend_ctes(compiled_node, manifest)\n\n should_wrap = {NodeType.Test, NodeType.Operation}\n if injected_node.resource_type in should_wrap:\n # data tests get wrapped in count(*)\n # TODO : move this somewhere more reasonable\n if 'data' in injected_node.tags and \\\n is_type(injected_node, NodeType.Test):\n injected_node.wrapped_sql = (\n \"select count(*) from (\\n{test_sql}\\n) sbq\").format(\n test_sql=injected_node.injected_sql)\n else:\n # don't wrap schema tests or analyses.\n injected_node.wrapped_sql = injected_node.injected_sql\n\n elif is_type(injected_node, NodeType.Archive):\n # unfortunately we do everything automagically for\n # archives. in the future it'd be nice to generate\n # the SQL at the parser level.\n pass\n\n elif(is_type(injected_node, NodeType.Model) and\n get_materialization(injected_node) == 'ephemeral'):\n pass\n\n else:\n injected_node.wrapped_sql = None\n\n return injected_node\n\n def write_graph_file(self, linker, manifest):\n filename = graph_file_name\n graph_path = os.path.join(self.config.target_path, filename)\n linker.write_graph(graph_path, manifest)\n\n def link_node(self, linker, node, manifest):\n linker.add_node(node.unique_id)\n\n for dependency in node.depends_on_nodes:\n if manifest.nodes.get(dependency):\n linker.dependency(\n node.unique_id,\n (manifest.nodes.get(dependency).unique_id))\n else:\n dbt.exceptions.dependency_not_found(node, dependency)\n\n def link_graph(self, linker, manifest):\n for node in manifest.nodes.values():\n self.link_node(linker, node, manifest)\n\n cycle = linker.find_cycles()\n\n if cycle:\n raise RuntimeError(\"Found a cycle: {}\".format(cycle))\n\n def compile(self, manifest):\n linker = Linker()\n\n self.link_graph(linker, manifest)\n\n stats = defaultdict(int)\n\n for node_name, node in itertools.chain(\n manifest.nodes.items(),\n manifest.macros.items()):\n stats[node.resource_type] += 1\n\n self.write_graph_file(linker, manifest)\n print_compile_stats(stats)\n\n return linker\n\n\ndef compile_manifest(config, manifest):\n compiler = Compiler(config)\n compiler.initialize()\n return compiler.compile(manifest)\n\n\ndef compile_node(adapter, config, node, manifest, extra_context):\n compiler = Compiler(config)\n node = compiler.compile_node(node, manifest, extra_context)\n node = _inject_runtime_config(adapter, node, extra_context)\n\n if(node.injected_sql is not None and\n not (dbt.utils.is_type(node, NodeType.Archive))):\n logger.debug('Writing injected SQL for node \"{}\"'.format(\n node.unique_id))\n\n written_path = dbt.writer.write_node(\n node,\n config.target_path,\n 'compiled',\n node.injected_sql)\n\n node.build_path = written_path\n\n return node\n\n\ndef _inject_runtime_config(adapter, node, extra_context):\n wrapped_sql = node.wrapped_sql\n context = _node_context(adapter, node)\n context.update(extra_context)\n sql = dbt.clients.jinja.get_rendered(wrapped_sql, context)\n node.wrapped_sql = sql\n return node\n\n\ndef _node_context(adapter, node):\n return {\n \"run_started_at\": dbt.tracking.active_user.run_started_at,\n \"invocation_id\": dbt.tracking.active_user.invocation_id,\n }\n",
"path": "core/dbt/compilation.py"
}
] | diff --git a/core/dbt/compilation.py b/core/dbt/compilation.py
index 9f101c39222..a48a97b41ea 100644
--- a/core/dbt/compilation.py
+++ b/core/dbt/compilation.py
@@ -36,6 +36,7 @@ def print_compile_stats(stats):
NodeType.Macro: 'macros',
NodeType.Operation: 'operations',
NodeType.Seed: 'seed files',
+ NodeType.Source: 'sources',
}
results = {k: 0 for k in names.keys()}
diff --git a/plugins/snowflake/dbt/include/snowflake/macros/adapters.sql b/plugins/snowflake/dbt/include/snowflake/macros/adapters.sql
index 05144c654b8..4e8c5f3fbdd 100644
--- a/plugins/snowflake/dbt/include/snowflake/macros/adapters.sql
+++ b/plugins/snowflake/dbt/include/snowflake/macros/adapters.sql
@@ -81,3 +81,10 @@
{% macro snowflake__current_timestamp() -%}
convert_timezone('UTC', current_timestamp())
{%- endmacro %}
+
+
+{% macro snowflake__rename_relation(from_relation, to_relation) -%}
+ {% call statement('rename_relation') -%}
+ alter table {{ from_relation }} rename to {{ to_relation }}
+ {%- endcall %}
+{% endmacro %}
diff --git a/test/integration/024_custom_schema_test/models/view_3.sql b/test/integration/024_custom_schema_test/models/view_3.sql
index c208e5d32df..33931704248 100644
--- a/test/integration/024_custom_schema_test/models/view_3.sql
+++ b/test/integration/024_custom_schema_test/models/view_3.sql
@@ -1,5 +1,5 @@
-{{ config(schema='test') }}
+{{ config(schema='test', materialized='table') }}
with v1 as (
diff --git a/test/integration/024_custom_schema_test/test_custom_schema.py b/test/integration/024_custom_schema_test/test_custom_schema.py
index 4a64d7e419f..2ca445f4dd5 100644
--- a/test/integration/024_custom_schema_test/test_custom_schema.py
+++ b/test/integration/024_custom_schema_test/test_custom_schema.py
@@ -1,5 +1,5 @@
from nose.plugins.attrib import attr
-from test.integration.base import DBTIntegrationTest
+from test.integration.base import DBTIntegrationTest, use_profile
class TestCustomSchema(DBTIntegrationTest):
@@ -85,6 +85,42 @@ def test__postgres__custom_schema_with_prefix(self):
self.assertTablesEqual("agg","view_3", schema, xf_schema)
+class TestCustomProjectSchemaWithPrefixSnowflake(DBTIntegrationTest):
+
+ @property
+ def schema(self):
+ return "custom_schema_024"
+
+ @property
+ def models(self):
+ return "test/integration/024_custom_schema_test/models"
+
+ @property
+ def project_config(self):
+ return {
+ "models": {
+ "schema": "dbt_test"
+ }
+ }
+
+ @use_profile('snowflake')
+ def test__snowflake__custom_schema_with_prefix(self):
+ self.use_default_project()
+ self.run_sql_file("test/integration/024_custom_schema_test/seed.sql")
+
+ results = self.run_dbt()
+ self.assertEqual(len(results), 3)
+
+ schema = self.unique_schema().upper()
+ v1_schema = "{}_DBT_TEST".format(schema)
+ v2_schema = "{}_CUSTOM".format(schema)
+ xf_schema = "{}_TEST".format(schema)
+
+ self.assertTablesEqual("SEED","VIEW_1", schema, v1_schema)
+ self.assertTablesEqual("SEED","VIEW_2", schema, v2_schema)
+ self.assertTablesEqual("AGG","VIEW_3", schema, xf_schema)
+
+
class TestCustomSchemaWithCustomMacro(DBTIntegrationTest):
@property
diff --git a/test/unit/test_snowflake_adapter.py b/test/unit/test_snowflake_adapter.py
index 0ee65d05759..d97ee2b4c7e 100644
--- a/test/unit/test_snowflake_adapter.py
+++ b/test/unit/test_snowflake_adapter.py
@@ -125,7 +125,7 @@ def test_quoting_on_rename(self):
)
self.mock_execute.assert_has_calls([
mock.call(
- 'alter table "test_database"."test_schema".table_a rename to table_b',
+ 'alter table "test_database"."test_schema".table_a rename to "test_database"."test_schema".table_b',
None
)
])
|
ivy-llc__ivy-13177 | tril_indces_from
| [
{
"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@to_ivy_arrays_and_back\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)\n\n\n@to_ivy_arrays_and_back\ndef diag(v, k=0):\n return ivy.diag(v, k=k)\n\n\n@to_ivy_arrays_and_back\ndef diag_indices(n, ndim=2):\n idx = ivy.arange(n, dtype=int)\n return (idx,) * ndim\n\n\n# take_along_axis\n@to_ivy_arrays_and_back\ndef take_along_axis(arr, indices, axis, mode=\"fill\"):\n return ivy.take_along_axis(arr, indices, axis, mode=mode)\n\n\n@to_ivy_arrays_and_back\ndef tril_indices(n_rows, n_cols=None, k=0):\n return ivy.tril_indices(n_rows, n_cols, k)\n",
"path": "ivy/functional/frontends/jax/numpy/indexing.py"
}
] | [
{
"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@to_ivy_arrays_and_back\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)\n\n\n@to_ivy_arrays_and_back\ndef diag(v, k=0):\n return ivy.diag(v, k=k)\n\n\n@to_ivy_arrays_and_back\ndef diag_indices(n, ndim=2):\n idx = ivy.arange(n, dtype=int)\n return (idx,) * ndim\n\n\n# take_along_axis\n@to_ivy_arrays_and_back\ndef take_along_axis(arr, indices, axis, mode=\"fill\"):\n return ivy.take_along_axis(arr, indices, axis, mode=mode)\n\n\n@to_ivy_arrays_and_back\ndef tril_indices(n_rows, n_cols=None, k=0):\n return ivy.tril_indices(n_rows, n_cols, k)\n\n\n@to_ivy_arrays_and_back\ndef tril_indices_from(arr, k=0):\n return ivy.tril_indices(arr.shape[-2], arr.shape[-1], k)\n",
"path": "ivy/functional/frontends/jax/numpy/indexing.py"
}
] | diff --git a/ivy/functional/frontends/jax/numpy/indexing.py b/ivy/functional/frontends/jax/numpy/indexing.py
index 9b0335ebe2cf7..f633d78cda6d5 100644
--- a/ivy/functional/frontends/jax/numpy/indexing.py
+++ b/ivy/functional/frontends/jax/numpy/indexing.py
@@ -30,3 +30,8 @@ def take_along_axis(arr, indices, axis, mode="fill"):
@to_ivy_arrays_and_back
def tril_indices(n_rows, n_cols=None, k=0):
return ivy.tril_indices(n_rows, n_cols, k)
+
+
+@to_ivy_arrays_and_back
+def tril_indices_from(arr, k=0):
+ return ivy.tril_indices(arr.shape[-2], arr.shape[-1], k)
diff --git a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_indexing.py b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_indexing.py
index ae1dad1c6a9b5..2891acab4f0be 100644
--- a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_indexing.py
+++ b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_indexing.py
@@ -148,7 +148,7 @@ def test_jax_numpy_diag_indices(
indices_same_dims=True,
valid_bounds=False,
),
- mode=st.sampled_from(['clip', 'fill', 'drop']),
+ mode=st.sampled_from(["clip", "fill", "drop"]),
test_with_out=st.just(False),
)
def test_jax_numpy_take_along_axis(
@@ -200,3 +200,35 @@ def test_jax_numpy_tril_indices(
n_rows=n_rows,
k=k,
)
+
+
+# tril_indices_from
+@handle_frontend_test(
+ fn_tree="jax.numpy.tril_indices_from",
+ dtype_and_x=helpers.dtype_and_values(
+ available_dtypes=helpers.get_dtypes("numeric"),
+ num_arrays=1,
+ min_num_dims=2,
+ max_num_dims=5,
+ ),
+ k=helpers.ints(min_value=-5, max_value=5),
+ test_with_out=st.just(False),
+)
+def test_jax_numpy_tril_indices_from(
+ dtype_and_x,
+ k,
+ test_flags,
+ frontend,
+ fn_tree,
+ on_device,
+):
+ dtype, x = dtype_and_x
+ helpers.test_frontend_function(
+ input_dtypes=dtype,
+ test_flags=test_flags,
+ frontend=frontend,
+ fn_tree=fn_tree,
+ on_device=on_device,
+ arr=x[0],
+ k=k,
+ )
|
ray-project__ray-9429 | [rllib] MARWIL tuned cartpole example (and my own experiments) produce nan rewards only.
<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->
### What is the problem? + Reproduction
I have a custom example that produces offline data and picks it up with MARWIL for training. I observed that I get `nan` reward values for my example every time, so I went a step back and used your cartpole example:
https://github.com/ray-project/ray/blob/cd5a207d69cdaf05b47d956c18e89d928585eec7/rllib/tuned_examples/marwil/cartpole-marwil.yaml
I'm following the exact steps there, i.e. first run
```
./train.py --run=PPO --env=CartPole-v0 \
--stop='{"timesteps_total": 50000}' \
--config='{"output": "/tmp/out", "batch_mode": "complete_episodes"}'
```
followed by
```
rllib train -f cartpole-marwil.yaml
```
I did this both on my currently preferred stable version `0.8.5`, as well as on the `0.9.0.dev0` wheel. The result is this:
```
== Status ==
Memory usage on this node: 19.4/32.0 GiB
Using FIFO scheduling algorithm.
Resources requested: 0/12 CPUs, 0/0 GPUs, 0.0/9.96 GiB heap, 0.0/3.42 GiB objects
Result logdir: /Users/maxpumperla/ray_results/cartpole-marwil
Number of trials: 2 (2 TERMINATED)
+--------------------------------+------------+-------+--------+--------+------------------+--------+----------+
| Trial name | status | loc | beta | iter | total time (s) | ts | reward |
|--------------------------------+------------+-------+--------+--------+------------------+--------+----------|
| MARWIL_CartPole-v0_7af06_00000 | TERMINATED | | 0 | 2206 | 58.5661 | 500007 | nan |
| MARWIL_CartPole-v0_7af06_00001 | TERMINATED | | 1 | 2248 | 58.6117 | 500286 | nan |
+--------------------------------+------------+-------+--------+--------+------------------+--------+----------+
```
Also, I've noticed that your MARWIL unit test is a pure smoke test and doesn't check reward values, but I didn't run that locally. Maybe it produces nan values as well.
In any case I'd appreciate any input here, as we'd love to use MARWIL for our "real" use case, in which we see the same behaviour.
| [
{
"content": "\"\"\"Example of using custom_loss() with an imitation learning loss.\n\nThe default input file is too small to learn a good policy, but you can\ngenerate new experiences for IL training as follows:\n\nTo generate experiences:\n$ ./train.py --run=PG --config='{\"output\": \"/tmp/cartpole\"}' --env=CartPole-v0\n\nTo train on experiences with joint PG + IL loss:\n$ python custom_loss.py --input-files=/tmp/cartpole\n\"\"\"\n\nimport argparse\nfrom pathlib import Path\nimport os\n\nimport ray\nfrom ray import tune\nfrom ray.rllib.examples.models.custom_loss_model import CustomLossModel, \\\n TorchCustomLossModel\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.utils.framework import try_import_tf\n\ntf1, tf, tfv = try_import_tf()\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--torch\", action=\"store_true\")\nparser.add_argument(\"--stop-iters\", type=int, default=200)\nparser.add_argument(\n \"--input-files\",\n type=str,\n default=os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"../tests/data/cartpole_small\"))\n\nif __name__ == \"__main__\":\n ray.init()\n args = parser.parse_args()\n\n # Bazel makes it hard to find files specified in `args` (and `data`).\n # Look for them here.\n if not os.path.exists(args.input_files):\n # This script runs in the ray/rllib/examples dir.\n rllib_dir = Path(__file__).parent.parent\n input_dir = rllib_dir.absolute().joinpath(args.input_files)\n args.input_files = str(input_dir)\n\n ModelCatalog.register_custom_model(\n \"custom_loss\", TorchCustomLossModel if args.torch else CustomLossModel)\n\n config = {\n \"env\": \"CartPole-v0\",\n \"num_workers\": 0,\n \"model\": {\n \"custom_model\": \"custom_loss\",\n \"custom_model_config\": {\n \"input_files\": args.input_files,\n },\n },\n \"framework\": \"torch\" if args.torch else \"tf\",\n }\n\n stop = {\n \"training_iteration\": args.stop_iters,\n }\n\n tune.run(\"PG\", config=config, stop=stop)\n",
"path": "rllib/examples/custom_loss.py"
}
] | [
{
"content": "\"\"\"Example of using custom_loss() with an imitation learning loss.\n\nThe default input file is too small to learn a good policy, but you can\ngenerate new experiences for IL training as follows:\n\nTo generate experiences:\n$ ./train.py --run=PG --config='{\"output\": \"/tmp/cartpole\"}' --env=CartPole-v0\n\nTo train on experiences with joint PG + IL loss:\n$ python custom_loss.py --input-files=/tmp/cartpole\n\"\"\"\n\nimport argparse\nfrom pathlib import Path\nimport os\n\nimport ray\nfrom ray import tune\nfrom ray.rllib.examples.models.custom_loss_model import CustomLossModel, \\\n TorchCustomLossModel\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.utils.framework import try_import_tf\n\ntf1, tf, tfv = try_import_tf()\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--torch\", action=\"store_true\")\nparser.add_argument(\"--stop-iters\", type=int, default=200)\nparser.add_argument(\n \"--input-files\",\n type=str,\n default=os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"../tests/data/cartpole/small\"))\n\nif __name__ == \"__main__\":\n ray.init()\n args = parser.parse_args()\n\n # Bazel makes it hard to find files specified in `args` (and `data`).\n # Look for them here.\n if not os.path.exists(args.input_files):\n # This script runs in the ray/rllib/examples dir.\n rllib_dir = Path(__file__).parent.parent\n input_dir = rllib_dir.absolute().joinpath(args.input_files)\n args.input_files = str(input_dir)\n\n ModelCatalog.register_custom_model(\n \"custom_loss\", TorchCustomLossModel if args.torch else CustomLossModel)\n\n config = {\n \"env\": \"CartPole-v0\",\n \"num_workers\": 0,\n \"model\": {\n \"custom_model\": \"custom_loss\",\n \"custom_model_config\": {\n \"input_files\": args.input_files,\n },\n },\n \"framework\": \"torch\" if args.torch else \"tf\",\n }\n\n stop = {\n \"training_iteration\": args.stop_iters,\n }\n\n tune.run(\"PG\", config=config, stop=stop)\n",
"path": "rllib/examples/custom_loss.py"
}
] | diff --git a/rllib/BUILD b/rllib/BUILD
index db350f045db7c..cfc050b4d5f2d 100644
--- a/rllib/BUILD
+++ b/rllib/BUILD
@@ -469,7 +469,9 @@ py_test(
py_test(
name = "test_marwil",
tags = ["agents_dir"],
- size = "small",
+ size = "medium",
+ # Include the json data file.
+ data = ["tests/data/cartpole/large.json"],
srcs = ["agents/marwil/tests/test_marwil.py"]
)
@@ -696,12 +698,12 @@ py_test(
tags = ["quick_train", "external_files"],
size = "small",
# Include the json data file.
- data = glob(["tests/data/cartpole_small/**"]),
+ data = ["tests/data/cartpole/small.json"],
args = [
"--env", "CartPole-v0",
"--run", "DQN",
"--stop", "'{\"training_iteration\": 1}'",
- "--config", "'{\"framework\": \"tf\", \"input\": \"tests/data/cartpole_small\", \"learning_starts\": 0, \"input_evaluation\": [\"wis\", \"is\"], \"exploration_config\": {\"type\": \"SoftQ\"}}'"
+ "--config", "'{\"framework\": \"tf\", \"input\": \"tests/data/cartpole\", \"learning_starts\": 0, \"input_evaluation\": [\"wis\", \"is\"], \"exploration_config\": {\"type\": \"SoftQ\"}}'"
]
)
@@ -798,12 +800,12 @@ py_test(
tags = ["quick_train", "external_files"],
size = "small",
# Include the json data file.
- data = glob(["tests/data/cartpole_small/**"]),
+ data = ["tests/data/cartpole/small.json"],
args = [
"--env", "CartPole-v0",
"--run", "MARWIL",
"--stop", "'{\"training_iteration\": 1}'",
- "--config", "'{\"framework\": \"tf\", \"input\": \"tests/data/cartpole_small\", \"learning_starts\": 0, \"input_evaluation\": [\"wis\", \"is\"], \"shuffle_buffer_size\": 10}'"
+ "--config", "'{\"framework\": \"tf\", \"input\": \"tests/data/cartpole\", \"learning_starts\": 0, \"input_evaluation\": [\"wis\", \"is\"], \"shuffle_buffer_size\": 10}'"
]
)
@@ -814,12 +816,12 @@ py_test(
tags = ["quick_train", "external_files"],
size = "small",
# Include the json data file.
- data = glob(["tests/data/cartpole_small/**"]),
+ data = ["tests/data/cartpole/small.json"],
args = [
"--env", "CartPole-v0",
"--run", "MARWIL",
"--stop", "'{\"training_iteration\": 1}'",
- "--config", "'{\"framework\": \"torch\", \"input\": \"tests/data/cartpole_small\", \"learning_starts\": 0, \"input_evaluation\": [\"wis\", \"is\"], \"shuffle_buffer_size\": 10}'"
+ "--config", "'{\"framework\": \"torch\", \"input\": \"tests/data/cartpole\", \"learning_starts\": 0, \"input_evaluation\": [\"wis\", \"is\"], \"shuffle_buffer_size\": 10}'"
]
)
@@ -1649,9 +1651,9 @@ py_test(
tags = ["examples", "examples_C"],
size = "small",
# Include the json data file.
- data = glob(["tests/data/cartpole_small/**"]),
+ data = ["tests/data/cartpole/small.json"],
srcs = ["examples/custom_loss.py"],
- args = ["--stop-iters=2", "--input-files=tests/data/cartpole_small"]
+ args = ["--stop-iters=2", "--input-files=tests/data/cartpole"]
)
py_test(
@@ -1660,9 +1662,9 @@ py_test(
tags = ["examples", "examples_C"],
size = "small",
# Include the json data file.
- data = glob(["tests/data/cartpole_small/**"]),
+ data = ["tests/data/cartpole/small.json"],
srcs = ["examples/custom_loss.py"],
- args = ["--torch", "--stop-iters=2", "--input-files=tests/data/cartpole_small"]
+ args = ["--torch", "--stop-iters=2", "--input-files=tests/data/cartpole"]
)
py_test(
diff --git a/rllib/agents/marwil/tests/test_marwil.py b/rllib/agents/marwil/tests/test_marwil.py
index fa6a9a98d5afd..19dcefeb94a2f 100644
--- a/rllib/agents/marwil/tests/test_marwil.py
+++ b/rllib/agents/marwil/tests/test_marwil.py
@@ -1,3 +1,5 @@
+import os
+from pathlib import Path
import unittest
import ray
@@ -18,19 +20,40 @@ def setUpClass(cls):
def tearDownClass(cls):
ray.shutdown()
- def test_marwil_compilation(self):
- """Test whether a MARWILTrainer can be built with all frameworks."""
+ def test_marwil_compilation_and_learning_from_offline_file(self):
+ """Test whether a MARWILTrainer can be built with all frameworks.
+
+ And learns from a historic-data file.
+ """
+ rllib_dir = Path(__file__).parent.parent.parent.parent
+ print("rllib dir={}".format(rllib_dir))
+ data_file = os.path.join(rllib_dir, "tests/data/cartpole/large.json")
+ print("data_file={} exists={}".format(
+ data_file, os.path.isfile(data_file)))
+
config = marwil.DEFAULT_CONFIG.copy()
config["num_workers"] = 0 # Run locally.
- num_iterations = 2
+ config["evaluation_num_workers"] = 1
+ config["evaluation_interval"] = 1
+ config["evaluation_config"] = {"input": "sampler"}
+ config["input"] = [data_file]
+ num_iterations = 300
# Test for all frameworks.
for _ in framework_iterator(config):
trainer = marwil.MARWILTrainer(config=config, env="CartPole-v0")
for i in range(num_iterations):
- trainer.train()
+ eval_results = trainer.train()["evaluation"]
+ print("iter={} R={}".format(
+ i, eval_results["episode_reward_mean"]))
+ # Learn until some reward is reached on an actual live env.
+ if eval_results["episode_reward_mean"] > 60.0:
+ print("learnt!")
+ break
+
check_compute_single_action(
trainer, include_prev_action_reward=True)
+
trainer.stop()
diff --git a/rllib/examples/custom_loss.py b/rllib/examples/custom_loss.py
index 0f47632d9cae8..b39f407fb6826 100644
--- a/rllib/examples/custom_loss.py
+++ b/rllib/examples/custom_loss.py
@@ -31,7 +31,7 @@
type=str,
default=os.path.join(
os.path.dirname(os.path.abspath(__file__)),
- "../tests/data/cartpole_small"))
+ "../tests/data/cartpole/small"))
if __name__ == "__main__":
ray.init()
diff --git a/rllib/tests/data/cartpole/large.json b/rllib/tests/data/cartpole/large.json
new file mode 100644
index 0000000000000..761ce0e6d7d29
--- /dev/null
+++ b/rllib/tests/data/cartpole/large.json
@@ -0,0 +1,21 @@
+{"type": "SampleBatch", "eps_id": [531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 531374291, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAALLBHb6TDbw8Rph7vL7yuL5YSR2+QxIwvhj7uLwUYJa91M4gvq51wDzIAsW8Bw2/vqdTIL6oYy++1hIBveeDtL2l1SO+/ufGPFBLCL2A/Me+WVYjvjZxLr7ASii9MGTevX7TJr5mz7q+CjAxvQv0Lj5tTC6+qTUtvv8wI72XZgq+QsMxvgI/2TxxQy690Vnhvjk4Mb7HASy+0FFSvU8IJb7mqDS+0oK5vq2FX71ESus9iBQ8vjN2Kr5PHFa9FytHvkt9P741u7i+RgtmvfZjpj3x4Ea+hxkOv29jX71sHbY+JT9Svi7st74CQEK9Hb49PYOaWb7AYCe+h3Q+vX2Zhb5981y+fzy3vsPUU72MA4k81EdkvufyJb4CdlK95GiVvnyZZ76ze7a+1V1qvXAxgbwd5m6+qPgMv5Goa70bQ4Q+NS16vhSltb4cf1a99LpUvaC4gL7swSK+ScBavdSpuL5JWYK+Xd+0vhlMeL3rrq69WveFvvgmDL/bSH+95ipAPoKSi76V3D2/ROlvvQCM6z6xKpO+xZZvv0Q5Sr2k1Ds/FcCcvup4Pb8uHg693hDaPkhUpL4fS2+/Z3TWvNcbNT+l5q2+f0A9vxYXRbxLNNA+lni1vvFBC7+bW3+7ca/hPZUKu77HMT2/pdbduvCkzT7vm8K+sjwLv9vDzzsXEt49uS3IviA2Pb/xaQs8XmTOPj+/z76XSgu/nMCHPC+s5z2XUdW+Z009v0RJmjzDbNI+DOTcvrdrC79Fn9083ZD+Pbd34r6NJLO+x/zxPG6PI77tDOa+G+0evlnR1zzeyeK+x6Pnvh2Ns77VPo88jHwRvhQ7674xygu/aO9vPBrRHz6H0vC+Pcmzvs2JkTyAHwe+CGv0vk8IIL5S1nc8wYTWvrcE9r7XBrS+MhfdO+r4+L10nvm+JFkgvmJrjTvCANO+8Tj7vpQbtL4SqoC7LtHxvRjT/r6a/Qu/ugvOuxGHMT5NNgK/dQi0vjfbOLuLaPi9LwMEvxb1C78466u7LJcuPsTPBr9a+bO+PrnwuiSd/b2AnAi/xh0gvltWjbuyjdW+c2kJv93ts77IV0+8l8sAvhE2C79q3gu/sY54vNnLJj4yAg6/xrmzvr0uQ7ylxQm+S84PvxbFC78HRW+8+w8ePuqZEr8kiLO+jbA8vGZTEr6EZRS/fxcfvo+Da7yI3eC+JzEVv7lXs77Atr2837IavkX8Fr8miQu/NXfWvKVvCT6xxhm/dWM9v9N5wLzlSNY+XZAdv6ZaC789z3e8B7/yPdtZIL9NsrK+W/hQvFcuN75RIyK/U2cdvkXLhbx+ffO+y+wiv/d7sr79tdO8+pZAvre1JL+Ayxy+d4byvMBD+r5pfiW/bRSyvhNOIb1fhlK+TEYnv2nXCr+fJTK9WWaYPSoNKr+5ebG+DA0svU06bb6A0yu/cIcKv3kHP71pdUI9xZguv2jUsL7YIzu9vOOEvnRdML8WMQq/AGdQvU+Qljz/IDO/c/I7v4/lTr3c05Y+SeM2v3m0bb+pwza9xCcSP1akO7/gmTu/nv4HvSlvhz7aZD+/XIgJv3am5Lweex29BSVCv+5gO78R8+q88i97PmbkRb/YOG2/b8LCvHZfBz/6okq/8y87v+I9WLwRNmo+YGFOvz0wCb9NSw28VXWLvccfUb+UGzu/hJsjvOItYz7F3VS/zx4Jv/vRtbsKeZe91JtXvxQNO7+iSua7SSxePodZW78HEwm/oDMwu4aXn71ZF16/AgQ7v5Uri7ugCls+3tRhv4gMCb/3FgI4txGkvY+SZL8TADu/e/HNusiuWT4AUGi/HfJsv4qpLzt0OAE/Kg1tvxUBO79eUVE8fgpaPqDKcL+xGQm/pIuLPPsFm72UiHO/7xw7v4dJfjwYpmM+mUZ3v8Eebb9CkaM8+RQFP6cEfL8IPju/Z734PJEhbz5Vw3+/GE1tvxuADz0SHwk/KEGCv9yvj78XYTs9F9JWP9Yghb8Il22/mQ+APTGqDz8RgYe/hOA7vxwMlz1445M+CGKJv5kTbr/e4KI9uJUaP4LDi7+Jbjy/qZy7PUOqrD7kpY2/wNcKv9hsyT0RK5k9VAmPv/4dPb8Qfcw9zirLPnjtkL/sjwu/7r3cPdd+DD6/UpK/Exe0vppc4j1x/vS9QzmTv4ZaDL88dt09RrhSPpKglL+7rLW+AeTlPWWaUL0diZW/WlwlvvvN4z1ZxZy+8fKVv6mfAj1QQ9c9BtQPvwvelb9zfCi+HEDAPfWtc77gSZa/c8O4vtWAtj3yi6k9XzaXvw6hDr/p5Lk9TO3NPoGjmL++4EC/S17KPTzZOD9FkZq/P08Pv7Hx5z1Akew+JACcv7eeQb+X3vo9icVJP8/vnb8rJhC/lJMNPldRCT/VYJ+/+4O9vtiPGD73npQ+aVOgvwiwNb65gR4+AMZEPbHHoL9wsL++mH0fPoRDxT4OvaG/+SU6vpNhJz7i9hQ+7igFO7Mks7yWAOQ88HQovXD+0DoA/jA+l0PdPFlZpr4ChqU7ppC5vEcIqDy0KcO8o62WO6VLX74LIaQ82u6MPvWceDkSUL68RzrRPJTVNLxslm65oKEvPldrzzzQUpe+e+VRO1mHuz7s/p48YIQUvxN+LDzI+i4+h8f/Oy0VkL5nfGQ8tVW7PgG1DjvYVRK/qzCuPGHVLj7poRe8WHeOvtkpyjzcYrs+ks9yvEvnEr9DEAM98y4vPnRs17zKXJK+BBQRPf7Rwbw8IQO9DV3QutYjDz0yjF++kkIDvQfIjz4qg/o8VW/TvpKC2LzGLxA/a9q2PBCyXr4Tdni8LFGGPs84kzwZYbW8kH8ivLK5D70lmI8800YxPg//Lby8aam+ZvWrPGr5srzYNY28BUUqvQ1hqDyLrjE+aAWUvG7lrb7lzsQ8YN+8Pvyqy7zJUSO/q58APdtNMj6fGBq9sNi0vlbjDj18dqe8GAg3vZu+lL2iNg09tA1cvj37PL3vj1I+3zf3PI6T0b7sIiy9CYH3PmAntDxixFq+KIkEvfULNj6sJpE8rgeUvK7x67yR3P+9wjCOPM7fWb7dNAC9x1QiPkKpVjxFAI28pHDmvCdRE75pBVE8lAFZvr8C/rwuKw8+Q5QLPNkr0L6PGue8G1nYPmcvyznqI1i+U9+hvGQK+D3tQnu7KNPPvnQHjryFo9A+wNJDvPWYV76Ihxa80AzgPUBohLwXos++41zlu/ZfzD6P2ca8SVxXvqrygDoDldU9uU7pvPMeebyfKkk7KZlAvnnM67x2a1e+8G81uq0y2D0IIge9Aq15vMcDujr11D++nmEIvS1uV76TiRi7x6rYPaKdGb16dnm8Ye1dufIfQL7y3Bq9qWRXviXlgbs/B9c9MhgsveGRz74YWPS6FPjKPkJOTb3KTle+4LbGO1NC0z3Dh169i3p5vJwoBTzAHEC+GMdfvfGDV76KXY87BWvcPdkEcb0tsc++MObVO8upzT71H4m9vKxXvvaSbjzDd+M9eMCRvVfYz74FfIk87w3RPiNhor0NIli+w2HMPDm89z1WBqu9tyXQvmEz4Dxxw9c+Mq27vcflWL5cnxI9GMgMPjpaxL1kFo28keIdPRglE77RDsW9JJU1PgodEj0TE9m+acu9vfTDlbxWw94840v2vR2Lvr22rDQ+Lw/LPPr4zr4BUbe9MNmbvP7TiDwqr9S9fRi4veOGW75xoG88iLZGPnLgwL3Pg5+8gZuXPHR3wL2grMG9xpYzPso1iDyf88K+on26vfmgo7y0phM8OMCpvRRPu727Y1y+avvwO7q6WT7eH8S9kibSvhkqPjxj8wE/we/UvXaqXL4tQLI8Nd1fPl/D3b2Jfam8pxHWPOx3ib1RnN69WV1dvkwSyzwDTG8+GHfnvT1zr7zmW/E8SzJRvatX6L25bTE+uv3oPJUrq77NPuG9WWe8PnY3sjx+Lh6/SyzSvciyMD759Rk84Qqjvucay70p8Lm8XXRGO8jdurznCMy9axdfvlGOKDu0qYo+XPXUvSCYurwc4gI8ymWsvDPk1b3EXDA+hPn3OyRVn75A1s69qWq8vKsdsDrOMIS8bMfPvag7MD6c0IU6++Wdvsy6yL2asby8/Keou34ofLxTrMm9GUswPhW+srsSkJ6+FJ/Cvblzu7z32T68r3qZvAWPw704EF++mP1EvC1eij4we8y95624vM7e2LuMo9a8k2fNvQfRMD6cCuq7flWkvvpUxr1YEbe8xzFevN80+rxNP8e9Kx4xPuUzaLycqqe+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAFhJHb5DEjC+GPu4vBRglr3UziC+rnXAPMgCxbwHDb++p1MgvqhjL77WEgG954O0vaXVI77+58Y8UEsIvYD8x75ZViO+NnEuvsBKKL0wZN69ftMmvmbPur4KMDG9C/QuPm1MLr6pNS2+/zAjvZdmCr5CwzG+Aj/ZPHFDLr3RWeG+OTgxvscBLL7QUVK9TwglvuaoNL7Sgrm+rYVfvURK6z2IFDy+M3Yqvk8cVr0XK0e+S30/vjW7uL5GC2a99mOmPfHgRr6HGQ6/b2NfvWwdtj4lP1K+Luy3vgJAQr0dvj09g5pZvsBgJ76HdD69fZmFvn3zXL5/PLe+w9RTvYwDiTzUR2S+5/IlvgJ2Ur3kaJW+fJlnvrN7tr7VXWq9cDGBvB3mbr6o+Ay/kahrvRtDhD41LXq+FKW1vhx/Vr30ulS9oLiAvuzBIr5JwFq91Km4vklZgr5d37S+GUx4veuurr1a94W++CYMv9tIf73mKkA+gpKLvpXcPb9E6W+9AIzrPrEqk77Flm+/RDlKvaTUOz8VwJy+6ng9vy4eDr3eENo+SFSkvh9Lb79ndNa81xs1P6Xmrb5/QD2/FhdFvEs00D6WeLW+8UELv5tbf7txr+E9lQq7vscxPb+l1t268KTNPu+bwr6yPAu/28PPOxcS3j25Lci+IDY9v/FpCzxeZM4+P7/PvpdKC7+cwIc8L6znPZdR1b5nTT2/REmaPMNs0j4M5Ny+t2sLv0Wf3TzdkP49t3fivo0ks77H/PE8bo8jvu0M5r4b7R6+WdHXPN7J4r7Ho+e+HY2zvtU+jzyMfBG+FDvrvjHKC79o7288GtEfPofS8L49ybO+zYmRPIAfB74Ia/S+TwggvlLWdzzBhNa+twT2vtcGtL4yF9076vj4vXSe+b4kWSC+YmuNO8IA077xOPu+lBu0vhKqgLsu0fG9GNP+vpr9C7+6C867EYcxPk02Ar91CLS+N9s4u4to+L0vAwS/FvULvzjrq7ssly4+xM8Gv1r5s74+ufC6JJ39vYCcCL/GHSC+W1aNu7KN1b5zaQm/3e2zvshXT7yXywC+ETYLv2reC7+xjni82csmPjICDr/GubO+vS5DvKXFCb5Lzg+/FsULvwdFb7z7Dx4+6pkSvySIs76NsDy8ZlMSvoRlFL9/Fx++j4NrvIjd4L4nMRW/uVezvsC2vbzfshq+RfwWvyaJC781d9a8pW8JPrHGGb91Yz2/03nAvOVI1j5dkB2/ploLvz3Pd7wHv/I921kgv02ysr5b+FC8Vy43vlEjIr9TZx2+RcuFvH59877L7CK/93uyvv2107z6lkC+t7Ukv4DLHL53hvK8wEP6vml+Jb9tFLK+E04hvV+GUr5MRie/adcKv58lMr1ZZpg9Kg0qv7l5sb4MDSy9TTptvoDTK79whwq/eQc/vWl1Qj3FmC6/aNSwvtgjO72844S+dF0wvxYxCr8AZ1C9T5CWPP8gM79z8ju/j+VOvdzTlj5J4za/ebRtv6nDNr3EJxI/VqQ7v+CZO7+e/ge9KW+HPtpkP79ciAm/dqbkvB57Hb0FJUK/7mA7vxHz6rzyL3s+ZuRFv9g4bb9vwsK8dl8HP/qiSr/zLzu/4j1YvBE2aj5gYU6/PTAJv01LDbxVdYu9xx9Rv5QbO7+EmyO84i1jPsXdVL/PHgm/+9G1uwp5l73Um1e/FA07v6JK5rtJLF4+h1lbvwcTCb+gMzC7hpefvVkXXr8CBDu/lSuLu6AKWz7e1GG/iAwJv/cWAji3EaS9j5JkvxMAO7978c26yK5ZPgBQaL8d8my/iqkvO3Q4AT8qDW2/FQE7v15RUTx+Clo+oMpwv7EZCb+ki4s8+wWbvZSIc7/vHDu/h0l+PBimYz6ZRne/wR5tv0KRozz5FAU/pwR8vwg+O79nvfg8kSFvPlXDf78YTW2/G4APPRIfCT8oQYK/3K+PvxdhOz0X0lY/1iCFvwiXbb+ZD4A9MaoPPxGBh7+E4Du/HAyXPXjjkz4IYom/mRNuv97goj24lRo/gsOLv4luPL+pnLs9Q6qsPuSljb/A1wq/2GzJPRErmT1UCY+//h09vxB9zD3OKss+eO2Qv+yPC7/uvdw9134MPr9Skr8TF7S+mlziPXH+9L1DOZO/hloMvzx23T1GuFI+kqCUv7ustb4B5OU9ZZpQvR2Jlb9aXCW++83jPVnFnL7x8pW/qZ8CPVBD1z0G1A+/C96Vv3N8KL4cQMA99a1zvuBJlr9zw7i+1YC2PfKLqT1fNpe/DqEOv+nkuT1M7c0+gaOYv77gQL9LXso9PNk4P0WRmr8/Tw+/sfHnPUCR7D4kAJy/t55Bv5fe+j2JxUk/z++dvysmEL+Ukw0+V1EJP9Vgn7/7g72+2I8YPveelD5pU6C/CLA1vrmBHj4AxkQ9scegv3Cwv76YfR8+hEPFPg69ob/5JTq+k2EnPuL2FD4wNKK/BrkuPEZcKj7wabu9cP7QOgD+MD6XQ908WVmmvgKGpTumkLm8RwioPLQpw7yjrZY7pUtfvgshpDza7ow+9Zx4ORJQvrxHOtE8lNU0vGyWbrmgoS8+V2vPPNBSl7575VE7WYe7Puz+njxghBS/E34sPMj6Lj6Hx/87LRWQvmd8ZDy1Vbs+AbUOO9hVEr+rMK48YdUuPumhF7xYd46+2SnKPNxiuz6Sz3K8S+cSv0MQAz3zLi8+dGzXvMpckr4EFBE9/tHBvDwhA70NXdC61iMPPTKMX76SQgO9B8iPPiqD+jxVb9O+koLYvMYvED9r2rY8ELJevhN2eLwsUYY+zziTPBlhtbyQfyK8srkPvSWYjzzTRjE+D/8tvLxpqb5m9as8avmyvNg1jbwFRSq9DWGoPIuuMT5oBZS8buWtvuXOxDxg37w+/KrLvMlRI7+rnwA9200yPp8YGr2w2LS+VuMOPXx2p7wYCDe9m76UvaI2DT20DVy+Pfs8ve+PUj7fN/c8jpPRvuwiLL0Jgfc+YCe0PGLEWr4oiQS99Qs2PqwmkTyuB5S8rvHrvJHc/73CMI48zt9Zvt00AL3HVCI+QqlWPEUAjbykcOa8J1ETvmkFUTyUAVm+vwL+vC4rDz5DlAs82SvQvo8a57wbWdg+Zy/LOeojWL5T36G8ZAr4Pe1Ce7so08++dAeOvIWj0D7A0kO89ZhXvoiHFrzQDOA9QGiEvBeiz77jXOW79l/MPo/ZxrxJXFe+qvKAOgOV1T25Tum88x55vJ8qSTspmUC+eczrvHZrV77wbzW6rTLYPQgiB70CrXm8xwO6OvXUP76eYQi9LW5XvpOJGLvHqtg9op0ZvXp2ebxh7V258h9AvvLcGr2pZFe+JeWBuz8H1z0yGCy94ZHPvhhY9LoU+Mo+Qk5NvcpOV77gtsY7U0LTPcOHXr2Lenm8nCgFPMAcQL4Yx1+98YNXvopdjzsFa9w92QRxvS2xz74w5tU7y6nNPvUfib28rFe+9pJuPMN34z14wJG9V9jPvgV8iTzvDdE+I2GivQ0iWL7DYcw8Obz3PVYGq723JdC+YTPgPHHD1z4yrbu9x+VYvlyfEj0YyAw+OlrEvWQWjbyR4h09GCUTvtEOxb0klTU+Ch0SPRMT2b5py7299MOVvFbD3jzjS/a9HYu+vbasND4vD8s8+vjOvgFRt70w2Zu8/tOIPCqv1L19GLi944ZbvnGgbzyItkY+cuDAvc+Dn7yBm5c8dHfAvaCswb3GljM+yjWIPJ/zwr6ifbq9+aCjvLSmEzw4wKm9FE+7vbtjXL5q+/A7urpZPt4fxL2SJtK+GSo+PGPzAT/B79S9dqpcvi1Asjw13V8+X8PdvYl9qbynEdY87HeJvVGc3r1ZXV2+TBLLPANMbz4Yd+e9PXOvvOZb8TxLMlG9q1fovbltMT66/eg8lSurvs0+4b1ZZ7w+djeyPH4uHr9LLNK9yLIwPvn1GTzhCqO+5xrLvSnwubxddEY7yN26vOcIzL1rF1++UY4oO7Spij5c9dS9IJi6vBziAjzKZay8M+TVvcRcMD6E+fc7JFWfvkDWzr2pary8qx2wOs4whLxsx8+9qDswPpzQhTr75Z2+zLrIvZqxvLz8p6i7fih8vFOsyb0ZSzA+Fb6yuxKQnr4Un8K9uXO7vPfZPryvepm8BY/DvTgQX76Y/US8LV6KPjB7zL3nrbi8zt7Yu4yj1ryTZ829B9EwPpwK6rt+VaS++lTGvVgRt7zHMV683zT6vE0/x70rHjE+5TNovJyqp76dKcC99NGzvC/Bqbwv8iC9lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.8949677348136902, 0.2854820489883423, 0.9052359461784363, 0.24984733760356903, 0.9164604544639587, 0.7893484234809875, 0.5310271382331848, 0.18627631664276123, 0.9338834285736084, 0.8487390875816345, 0.40080294013023376, 0.8715166449546814, 0.658420979976654, 0.689258873462677, 0.3081173002719879, 0.8998115062713623, 0.26141199469566345, 0.913749635219574, 0.7835085988044739, 0.5238140225410461, 0.1894565224647522, 0.9329298734664917, 0.8453688025474548, 0.6032693982124329, 0.27685293555259705, 0.893722414970398, 0.2753779888153076, 0.8976496458053589, 0.7433987259864807, 0.5804827213287354, 0.7586576342582703, 0.5516151785850525, 0.7794906497001648, 0.5118468999862671, 0.8042495846748352, 0.538456916809082, 0.24315714836120605, 0.9007118940353394, 0.748458981513977, 0.5794850587844849, 0.27144572138786316, 0.8901560306549072, 0.27271538972854614, 0.8910347819328308, 0.7340371608734131, 0.5822507739067078, 0.7293457388877869, 0.5845097899436951, 0.27595728635787964, 0.8879104852676392, 0.7372824549674988, 0.5629269480705261, 0.7396020293235779, 0.5552812218666077, 0.25820234417915344, 0.8933025002479553, 0.7614201307296753, 0.48816370964050293, 0.7862693667411804, 0.5133485794067383, 0.2381698489189148, 0.897735059261322, 0.2182164043188095, 0.9072064757347107, 0.8073776960372925, 0.4133623540401459, 0.8243219256401062, 0.3771069347858429, 0.8413662314414978, 0.6608332395553589, 0.3812047839164734, 0.8311269283294678, 0.6087785363197327, 0.6787657141685486, 0.4062293469905853, 0.8176510334014893, 0.5982443690299988, 0.6696091890335083, 0.5969210267066956, 0.6648812294006348, 0.5974056124687195, 0.6586463451385498, 0.5995053648948669, 0.6510249972343445, 0.39692065119743347, 0.8157919049263, 0.6219735145568848, 0.6127027869224548, 0.36448463797569275, 0.8313741683959961, 0.337843656539917, 0.15515053272247314, 0.9258947372436523, 0.8647976517677307, 0.25696811079978943, 0.8831287026405334, 0.784720778465271, 0.3644827902317047, 0.8195744752883911, 0.6923335194587708, 0.4548424482345581, 0.7395390868186951, 0.6029321551322937, 0.47271326184272766, 0.6373288035392761, 0.4909321963787079, 0.3159151077270508, 0.16873405873775482, 0.911851704120636, 0.14181312918663025, 0.9227877855300903, 0.882472813129425, 0.8231956362724304, 0.25083988904953003, 0.8550863265991211, 0.7966943383216858, 0.5455768704414368, 0.7930395007133484, 0.43641868233680725, 0.8619645237922668, 0.6047736406326294, 0.24368080496788025, 0.9141198992729187, 0.2317906618118286, 0.9197134375572205, 0.20742760598659515, 0.9279178977012634, 0.8253203630447388, 0.5318312048912048, 0.19953255355358124, 0.9282232522964478, 0.8053845763206482, 0.45844629406929016, 0.8473955392837524, 0.425632119178772, 0.13817322254180908, 0.9456128478050232, 0.8870267868041992, 0.6990184783935547, 0.3420375883579254, 0.8814630508422852, 0.6405285000801086, 0.7468997836112976, 0.6068054437637329, 0.7705122232437134, 0.42884406447410583, 0.8519783020019531, 0.4350297152996063, 0.8528088331222534, 0.42816853523254395, 0.8586954474449158, 0.5914314985275269, 0.7693524956703186, 0.5925441980361938, 0.7694152593612671, 0.5915730595588684, 0.7708016633987427, 0.41154390573501587, 0.8666397929191589, 0.6139383912086487, 0.7519904375076294, 0.378618448972702, 0.8802904486656189, 0.3438532054424286, 0.893405556678772, 0.29963135719299316, 0.9077726602554321, 0.7506021857261658, 0.4040895104408264, 0.8543979525566101, 0.4241299331188202, 0.8495427966117859, 0.5679969787597656, 0.7946709990501404, 0.4594833254814148, 0.836846113204956, 0.541279137134552, 0.19262954592704773, 0.9331842660903931, 0.8330914974212646, 0.45803844928741455, 0.8505011200904846, 0.5852563977241516, 0.23887963593006134, 0.9138653874397278, 0.773739218711853, 0.4239019453525543, 0.8591747879981995, 0.5933403372764587, 0.7661020755767822, 0.5868037343025208, 0.7736600041389465, 0.5719375610351562, 0.7861037254333496, 0.45181483030319214, 0.8443316221237183, 0.5466866493225098, 0.8014476299285889, 0.5193928480148315, 0.8187002539634705], "advantages": [0.21745966374874115, -1.3587554693222046, 0.2890452742576599, -1.3989312648773193, 0.5305141806602478, -1.250394582748413, -2.47453236579895, -1.1006877422332764, 1.2681771516799927, -0.5738205313682556, -2.072775363922119, -0.05673271417617798, -1.6238951683044434, -2.815504789352417, -1.2751059532165527, 0.8723791837692261, -0.6053474545478821, 1.4760985374450684, 0.1886976808309555, -1.1512235403060913, 0.7168366312980652, 2.4015207290649414, 1.5091012716293335, 0.5575752854347229, -0.7179286479949951, -2.1994543075561523, -0.5006898045539856, -2.078684091567993, -0.5276636481285095, 0.3403717279434204, -0.5139458775520325, 0.07105739414691925, -0.6441061496734619, -0.31162527203559875, -0.9114060997962952, -0.7999306321144104, -0.9937840700149536, -0.7982866764068604, -1.1026670932769775, -1.1428343057632446, -1.4890213012695312, -1.4131602048873901, -1.5820198059082031, -1.516117811203003, -1.6305266618728638, -1.5550901889801025, -1.9545233249664307, -1.8626054525375366, -2.2881417274475098, -2.2834410667419434, -2.306039571762085, -2.234785556793213, -2.5949575901031494, -2.542801856994629, -2.884005546569824, -2.8175363540649414, -2.847313165664673, -2.884158134460449, -3.3757777214050293, -3.433220863342285, -3.6368627548217773, -3.441377878189087, -3.580565929412842, -3.222698211669922, -3.444593906402588, -3.7557919025421143, -3.542888879776001, -3.941218376159668, -3.5916621685028076, -4.086845874786377, -5.003336429595947, -6.441424369812012, -5.719454288482666, -5.232696533203125, -6.273830890655518, -7.807031154632568, -7.120766639709473, -6.63323450088501, -7.815937519073486, -7.302456378936768, -8.561629295349121, -8.027447700500488, -9.35847282409668, -8.809571266174316, -10.206872940063477, -11.984430313110352, -11.363447189331055, -10.877972602844238, -12.373501777648926, -14.212943077087402, -13.69473648071289, -15.550026893615723, -17.74781036376953, -17.18828773498535, -16.90947723388672, -18.776329040527344, -18.59465217590332, -18.61871337890625, -20.198728561401367, -20.302513122558594, -20.4595890045166, -21.88591194152832, -22.124610900878906, -22.25788116455078, -22.106473922729492, -23.6201171875, -25.072233200073242, -26.655244827270508, -28.601224899291992, -28.853046417236328, -30.872180938720703, -31.212308883666992, -31.802932739257812, -32.508113861083984, -34.050262451171875, -34.85260772705078, 16.146289825439453, 15.883732795715332, 15.914982795715332, 16.893966674804688, 15.654492378234863, 15.357436180114746, 15.64351749420166, 15.129483222961426, 15.430910110473633, 14.906143188476562, 15.246929168701172, 14.690457344055176, 14.721792221069336, 15.656871795654297, 17.956584930419922, 15.197473526000977, 14.025788307189941, 13.805998802185059, 13.732562065124512, 13.539511680603027, 14.07516098022461, 13.330644607543945, 13.174162864685059, 13.762556076049805, 15.580601692199707, 13.255160331726074, 12.447029113769531, 12.825960159301758, 12.111981391906738, 12.403304100036621, 13.778947830200195, 11.94132137298584, 13.174569129943848, 11.498538970947266, 12.612510681152344, 11.069765090942383, 10.65079116821289, 10.683614730834961, 10.3037109375, 10.296500205993652, 9.956096649169922, 9.908475875854492, 10.686444282531738, 9.485438346862793, 9.20966911315918, 9.094189643859863, 9.749462127685547, 8.668691635131836, 9.283961296081543, 8.241490364074707, 8.84189510345459, 7.812507152557373, 7.534442901611328, 7.739901065826416, 7.127336502075195, 7.329253196716309, 6.721649646759033, 6.588075160980225, 6.28385591506958, 6.476897716522217, 5.866865158081055, 5.7219438552856445, 6.298417091369629, 5.243937969207764, 4.915683746337891, 4.780351161956787, 4.428664684295654, 4.520269393920898, 5.244883060455322, 4.0799174308776855, 3.508349895477295, 3.3825762271881104, 3.0049028396606445, 3.100775718688965, 2.517244577407837, 2.624274969100952, 2.0264055728912354, 2.1551806926727295, 1.5343471765518188, 1.3545794486999512, 1.0044246912002563, 1.1741865873336792, 0.4995787739753723, 0.7142715454101562]}
+{"type": "SampleBatch", "eps_id": [1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 1857488294, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAJ0pwL300bO8L8GpvC/yIL3JD8G90PhdvkUxsLzts3w+x/DJvf/frryQwoe8D3xXvZ7Qyr1NLzI+IGGQvEtxs74BsMO9KN+qvA/Nybzl14G9uYrEvW7MMj5AMNS8uz26vtNjvb0+9aS8kOQHvSSBor34Nr69A+xbvpxkDr3cjE8++ALHvSkCnbz1k/u8BVrOvfHLx71/91q+BQsGvUNzOj4pjtC9XZaVvApB7ryiSPe9ok3RvYQQWr60BAG9tYYmPp0G2r13gI68f2TnvAsvD74Evdq9IjFZvk1N/rxhQxM+EG3jvY9D0L5nvea8smPaPk4W9L2EU1i+8NqgvPgdAD59vfy9humBvENbjLzw3TG+xmP9veDJV76y0Ki83ILoPbkCA756pM++3TaWvNedzD78UAu+/TdXvlp5KbwIVM89554PvmRvz74wTQi8dAHIPgrrF74K8la+GMQEuppCwz2PNxy+cCFyvM6MtzqmPUq+C4UcvmL1Vr7eFye7SdbDPaHRIL5z3HG8tgcnuoScSr4GHyG+qbI4PtCMlrsiQ/u+X20dvialcLw2FWy8sk1MvmG6Hb6UoVa+4LqWvHlttT1KBSK+y/povD03iLwa4Va+2E8ivmAeVr6ymKq8zdGePSGYJr5zO2C8EuSdvEnzYr7j3ya+LYhVvvUzwryp74Q9KyUrvoh9zr5skbe83zWzPqFnM75h21S+AnF8vJxCTj11qTe+/0RNvM3wa7xEFH2+Jes3vt4uOz6Gdp68ulkLv8QsNL63aEW8taX3vOz/g77wazS+HbBTvpDxEL0xUM48x6c4vnIdNrxm4Q699pCOvg7iOL4BqVK+6rAlvSeYRjujGD2+hezMvl1xJb2Iu5A+D0tFvj96Ub4eSQ699wO4vJV7Sb6/4xK8MiAQvWncpr6Wqkm+cXRQvtPSKr29WDa94NVNvrXPy75weC69flpwPuf8Vb7XsRe//z0bvcx/Az+cH2K+cznLvipT4rxqT1Y+oUBqvqZ1F78FCcC8X3v8PoVedr782cq+ZntevGrJRT64e36+sbNNvrUwH7zhjNS9dUyBvnKuyr7EMkG860Y+PjBahb4jY02+Rk8EvFBu4r36Z4e+TonKvuKJKLws3zc+93SLvmsuF784Ztu7dh7wPg+Bkb6Aacq+8+cvO5xgMj5pjZW+xioXv1Mdyjuree8+XJmbvgR6yr5KUn48kj01Pgumn74aYU2+wiicPJvS4r3Qs6G+BoC7u24Dijz9csu+0MKhvqLnTb6+0RE8IprLve7Ro74Wase7Z3ziO0pPx77i4aO+5yJOvkgLZbqwYMG9l/GlvtX2yr7SBTW7b7xKPsQAqr5KFU6+ePWcOnK5w71XEKy+6vPKvgAjO7pUO0o+dR+wvhsXTr6NElQ7g2nDvQ0vsr6j+Mq+cwSuOn4LSz5DPra+NihOvv1zrTtud8C9Bk64vgUFy75Cul876y1NPnxdvL7WSE6+ty3zO6/Yur2Tbb6+SBnLvkJjtzsWrFA+cH3Cvpx5Tr4OeB48onGyvQSOxL4hCNq7/+oBPOvkwL51n8S+l7tOvpbmzjk+EKe9sbDGvsng3bu0HaK6Ro6/vnLCxr52uE6+NdwOvI2bp72m08i+uzLLvmutKbzOElU+BuTMvv9wTr7h/Mq72euzvYP0zr4cetG7BEgCvNPUw75FBc++CDxOvgadf7wrEb29OxXRvkjnyr6b7o68hxdIPhkk1b6EVhe/rtVdvD0O9z7MMdu+Tq3KvlvgfrvbED4+gT/fvgd+Tb5AeTm5VMrdvZBN4b4Y6rW7tYkZu29Uzb4eXOG+onRNvqrLKbxSa9+9FWrjvt0BsLvqik28jV/Pvip4477jHk2+eSGpvHw77r1GheW+FZ2gu3kwvLyztdS+H5Llvqp7TL7UIAC9zjcFvpmd575W6Mm+IskKvco3HD5dp+u+1sYWv5ST/Lx/ZN4+Uq/xvuCcSL8uabW8Ppg3P5i1+b7ae3q/GqT/u69bgD9G3QG/kIFIv73GSDw1KjU/3d8Fv1GZFr9sVdg8w3bWPu7iCL/vfcm+J3sOPSndCT7A5gq/2bRLvpuCGT2zZha+f+sLvxgFyr5keg09ECshPqrwDb8gwky+HV8aPbtb/r3C9g6/Dtmwu3wyED1LPM++1f0Ov/TSTb48FN48qjrPvUkFEL8FAsu+L4DNPB+5TD78DBK/XJZOvqRB7jxUjK29axUTvztny75gX+A8ri5ePiEeFb/OaU+++/UBPesZib2eJxa/oNXLviL0+DxvO3E+bzEYv2VSUL6Axg89zwJCvRY8Gb/UT8y+KuULPUUqgz4gRxu/slVRvrDhID00MNG8E1Mcv34OIrwqyh4963OcvgpgHL/+eVK+3MEFPaL5cLpzbR2/rakyvJSuBT129ZC+vnsdv4YlPD4i+tw83LkQv+qKHL/ZxsE+OVqAPElyWb/Ymhq/kIg7PjkDrbrZRA2/bzD4u/I/rzsa5wU9ld1EvCiv9LvbykK+HesEPbTjlD75rDi8ksGQO6C9HD1krgs8Zjo3vOm+Sz5rcB098C6LvhcP7LsdsVk7eCsHPaf8BT3M4em7ybRKPnvZCT1NXX++1UxQu65FGzs81+o87A9cPdcxTbtgzEk+rKTzPGpKa77EbVQ645PIPir/zTxzTwO/gqUNPEr/SD6O6nM8j4dZvjP3TTwoSHo6iU4uPJDlrT1LR048759IPlkhSjxvTVG+PD2HPJg1JDpKJwc8Nrq8PYJXhzwDU0g+kFklPLyqSr7KZKc8cwK/OSz+yDvRkMg9EnSnPF4WSD5ClgQ8tm9FvqZ3xzzq4sc+itCKO1pC974stwM9LOhHPlutsbtsckG+RLUTPWxFOjnavRa87AHRPf24Ez2afEe+z5nqu7KcyD6BwwM9lcfXORd2MTqxbcY9IswDPWU3SD4LXCs7OEZIvpHQEz3O/cc+rPupumKR+b430DM9XjJIPqr4NLzp2Ee+PtRDPW4wDjoo7HS80IrAPZ7fQz3eBke+oB1WvC2Owz6M8zM9lC53OqvrsbtWba49Uwc0PYbZSD4zNXS7Lj5WvroYRD2+Wsg+FJwBvLLJAL8/J2Q95gRJPqU6k7z6AVq+Hjx0PYuKyD49HLa8ut0CvyQpij3xmkk+rO4EvcUDZ76UOZI9D5USO9tpF70gIGg9CFGSPc7zRL5fxRK9T8isPj1wij0Ov1Y7Z0DuvAULCj2Zkoo9IpNLPti66LwOOYm+NLeSPV1KhjsSUgq9vS9/PC7ikj31ekw+bgsJvSE8k74OEJs9PMKlOyyaIL1MPbi7GUWbPZyJTT4WECG9m+6evs59oz1J8so+8X06vf1/Hb8lurM9x8ROPlfkbL2qqay+df+7PTdO+ztNQoS907SBveBPvD0M8j6+ZtqGvXtLVT6YrLQ9SR0cPIWkfL3k1NW9ghC1PfAWPb4UmYK9yzosPjuArT2e88G+5Gp3vV5U4T4Z/J09VEc7vmVdU73OGAQ+Xn6WPQMlwb4MzEi97FfPPsQKhz0Kzzm+Q58nvVgmxz0rOH89oX7AvvinH72v48A+lmtgPe+jOL44ywC9+YCTPSemUT0NyHk8j8n1vCTxa77f5VI93b03vt/EDb26r1c92DJEPbmCv76OdAm9Ph+rPpGOJT2Cvja+yybcvA0s/zz47xY9eo6LPFAM17wTL4q+O1UYPYL3Nb4rogG9C+hrPI3GCT11pb6+NXQAvUEHmD5bi9Y8cAs1vkJC0LzTb7O7xJO5PMU7vr7vJ9G8auOOPq5neTzu+BC/hm6jvCdIED+HXH87utlCv8YuDrwslVk/e5E5vIjeEL+7Ugg85fUNPx+Aubx75b2+LgSfPGBqhz5vRPa870Y0vmtZyjxwV7S8S44JvcI3vr4SvsY8AYWOPp39J71c/jS+RVn0PGZz17tdeDa9Y+KSPH5F8zyGU5S+VgA1vWPdNb6bzsM8Cd9HPO6MQ72ipYw8R87FPKuyi77fJEK9cZE2vjkamTwFEuA83r9QvXaohzx3lZ086MyEvpVkT72THze+6CxmPIMFIT3zCl692JC/vqAOczzXO6w+fLF8vcyLN760pLA8TFpGPUGwhb2R1L++1ZO4PDkXsj7wCJW99jA4vgaR8Tz0V389D2ecvUI1wL6+x/s8nXW6PnvHq71uEjm+QLkbPT2Xpj2eLrO9+3diPCRjIj2a4ku+rZ2yvSU1Or6SExI9LLXYPXEQur3H5FA8qL4aPe+dM77Airm9/0c7vhpgDD2aCwQ+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAMkPwb3Q+F2+RTGwvO2zfD7H8Mm9/9+uvJDCh7wPfFe9ntDKvU0vMj4gYZC8S3GzvgGww70o36q8D83JvOXXgb25isS9bswyPkAw1Ly7Pbq+02O9vT71pLyQ5Ae9JIGivfg2vr0D7Fu+nGQOvdyMTz74Ase9KQKdvPWT+7wFWs698cvHvX/3Wr4FCwa9Q3M6PimO0L1dlpW8CkHuvKJI972iTdG9hBBavrQEAb21hiY+nQbavXeAjrx/ZOe8Cy8PvgS92r0iMVm+TU3+vGFDEz4QbeO9j0PQvme95ryyY9o+Thb0vYRTWL7w2qC8+B0APn29/L2G6YG8Q1uMvPDdMb7GY/294MlXvrLQqLzcgug9uQIDvnqkz77dNpa8153MPvxQC779N1e+WnkpvAhUzz3nng++ZG/PvjBNCLx0Acg+CusXvgryVr4YxAS6mkLDPY83HL5wIXK8zoy3OqY9Sr4LhRy+YvVWvt4XJ7tJ1sM9odEgvnPccby2Bye6hJxKvgYfIb6psjg+0IyWuyJD+75fbR2+JqVwvDYVbLyyTUy+YbodvpShVr7gupa8eW21PUoFIr7L+mi8PTeIvBrhVr7YTyK+YB5WvrKYqrzN0Z49IZgmvnM7YLwS5J28SfNivuPfJr4tiFW+9TPCvKnvhD0rJSu+iH3OvmyRt7zfNbM+oWczvmHbVL4CcXy8nEJOPXWpN77/RE28zfBrvEQUfb4l6ze+3i47PoZ2nry6WQu/xCw0vrdoRby1pfe87P+DvvBrNL4dsFO+kPEQvTFQzjzHpzi+ch02vGbhDr32kI6+DuI4vgGpUr7qsCW9J5hGO6MYPb6F7My+XXElvYi7kD4PS0W+P3pRvh5JDr33A7i8lXtJvr/jErwyIBC9adymvpaqSb5xdFC+09Iqvb1YNr3g1U2+tc/LvnB4Lr1+WnA+5/xVvtexF7//PRu9zH8DP5wfYr5zOcu+KlPivGpPVj6hQGq+pnUXvwUJwLxfe/w+hV52vvzZyr5me168aslFPrh7fr6xs02+tTAfvOGM1L11TIG+cq7KvsQyQbzrRj4+MFqFviNjTb5GTwS8UG7ivfpnh75Oicq+4okovCzfNz73dIu+ay4Xvzhm27t2HvA+D4GRvoBpyr7z5y87nGAyPmmNlb7GKhe/Ux3KO6t57z5cmZu+BHrKvkpSfjySPTU+C6afvhphTb7CKJw8m9LivdCzob4GgLu7bgOKPP1yy77QwqG+oudNvr7RETwimsu97tGjvhZqx7tnfOI7Sk/HvuLho77nIk6+SAtlurBgwb2X8aW+1fbKvtIFNbtvvEo+xACqvkoVTr549Zw6crnDvVcQrL7q88q+ACM7ulQ7Sj51H7C+GxdOvo0SVDuDacO9DS+yvqP4yr5zBK46fgtLPkM+tr42KE6+/XOtO253wL0GTri+BQXLvkK6XzvrLU0+fF28vtZITr63LfM7r9i6vZNtvr5IGcu+QmO3OxasUD5wfcK+nHlOvg54HjyicbK9BI7EviEI2rv/6gE86+TAvnWfxL6Xu06+lubOOT4Qp72xsMa+yeDdu7QdorpGjr++csLGvna4Tr413A68jZunvabTyL67Msu+a60pvM4SVT4G5My+/3BOvuH8yrvZ67O9g/TOvhx60bsESAK809TDvkUFz74IPE6+Bp1/vCsRvb07FdG+SOfKvpvujryHF0g+GSTVvoRWF7+u1V28PQ73Pswx275Orcq+W+B+u9sQPj6BP9++B35NvkB5OblUyt29kE3hvhjqtbu1iRm7b1TNvh5c4b6idE2+qsspvFJr370VauO+3QGwu+qKTbyNX8++KnjjvuMeTb55Iam8fDvuvUaF5b4VnaC7eTC8vLO11L4fkuW+qntMvtQgAL3ONwW+mZ3nvlboyb4iyQq9yjccPl2n677Wxha/lJP8vH9k3j5Sr/G+4JxIvy5ptbw+mDc/mLX5vtp7er8apP+7r1uAP0bdAb+QgUi/vcZIPDUqNT/d3wW/UZkWv2xV2DzDdtY+7uIIv+99yb4new49Kd0JPsDmCr/ZtEu+m4IZPbNmFr5/6wu/GAXKvmR6DT0QKyE+qvANvyDCTL4dXxo9u1v+vcL2Dr8O2bC7fDIQPUs8z77V/Q6/9NJNvjwU3jyqOs+9SQUQvwUCy74vgM08H7lMPvwMEr9clk6+pEHuPFSMrb1rFRO/O2fLvmBf4DyuLl4+IR4Vv85pT7779QE96xmJvZ4nFr+g1cu+IvT4PG87cT5vMRi/ZVJQvoDGDz3PAkK9FjwZv9RPzL4q5Qs9RSqDPiBHG7+yVVG+sOEgPTQw0bwTUxy/fg4ivCrKHj3rc5y+CmAcv/55Ur7cwQU9ovlwunNtHb+tqTK8lK4FPXb1kL6+ex2/hiU8PiL63DzcuRC/6oocv9nGwT45WoA8SXJZv9iaGr+QiDs+OQOtutlEDb/Nqhm/5QlEvGhzSry05YS+KK/0u9vKQr4d6wQ9tOOUPvmsOLySwZA7oL0cPWSuCzxmOje86b5LPmtwHT3wLou+Fw/sux2xWTt4Kwc9p/wFPczh6bvJtEo+e9kJPU1df77VTFC7rkUbOzzX6jzsD1w91zFNu2DMST6spPM8akprvsRtVDrjk8g+Kv/NPHNPA7+CpQ08Sv9IPo7qczyPh1m+M/dNPChIejqJTi48kOWtPUtHTjzvn0g+WSFKPG9NUb48PYc8mDUkOkonBzw2urw9gleHPANTSD6QWSU8vKpKvspkpzxzAr85LP7IO9GQyD0SdKc8XhZIPkKWBDy2b0W+pnfHPOrixz6K0Io7WkL3viy3Az0s6Ec+W62xu2xyQb5EtRM9bEU6Odq9FrzsAdE9/bgTPZp8R77Pmeq7spzIPoHDAz2Vx9c5F3YxOrFtxj0izAM9ZTdIPgtcKzs4Rki+kdATPc79xz6s+6m6YpH5vjfQMz1eMkg+qvg0vOnYR74+1EM9bjAOOijsdLzQisA9nt9DPd4GR76gHVa8LY7DPozzMz2ULnc6q+uxu1Ztrj1TBzQ9htlIPjM1dLsuPla+uhhEPb5ayD4UnAG8sskAvz8nZD3mBEk+pTqTvPoBWr4ePHQ9i4rIPj0ctry63QK/JCmKPfGaST6s7gS9xQNnvpQ5kj0PlRI722kXvSAgaD0IUZI9zvNEvl/FEr1PyKw+PXCKPQ6/VjtnQO68BQsKPZmSij0ik0s+2LrovA45ib40t5I9XUqGOxJSCr29L388LuKSPfV6TD5uCwm9ITyTvg4Qmz08wqU7LJogvUw9uLsZRZs9nIlNPhYQIb2b7p6+zn2jPUnyyj7xfTq9/X8dvyW6sz3HxE4+V+RsvaqprL51/7s9N077O01ChL3TtIG94E+8PQzyPr5m2oa9e0tVPpistD1JHRw8haR8veTU1b2CELU98BY9vhSZgr3LOiw+O4CtPZ7zwb7kane9XlThPhn8nT1URzu+ZV1Tvc4YBD5efpY9AyXBvgzMSL3sV88+xAqHPQrPOb5Dnye9WCbHPSs4fz2hfsC++Kcfva/jwD6Wa2A976M4vjjLAL35gJM9J6ZRPQ3IeTyPyfW8JPFrvt/lUj3dvTe+38QNvbqvVz3YMkQ9uYK/vo50Cb0+H6s+kY4lPYK+Nr7LJty8DSz/PPjvFj16jos8UAzXvBMvir47VRg9gvc1viuiAb0L6Gs8jcYJPXWlvr41dAC9QQeYPluL1jxwCzW+QkLQvNNvs7vEk7k8xTu+vu8n0bxq444+rmd5PO74EL+GbqO8J0gQP4dcfzu62UK/xi4OvCyVWT97kTm8iN4Qv7tSCDzl9Q0/H4C5vHvlvb4uBJ88YGqHPm9E9rzvRjS+a1nKPHBXtLxLjgm9wje+vhK+xjwBhY4+nf0nvVz+NL5FWfQ8ZnPXu114Nr1j4pI8fkXzPIZTlL5WADW9Y901vpvOwzwJ30c87oxDvaKljDxHzsU8q7KLvt8kQr1xkTa+ORqZPAUS4Dzev1C9dqiHPHeVnTzozIS+lWRPvZMfN77oLGY8gwUhPfMKXr3YkL++oA5zPNc7rD58sXy9zIs3vrSksDxMWkY9QbCFvZHUv77Vk7g8OReyPvAIlb32MDi+BpHxPPRXfz0PZ5y9QjXAvr7H+zyddbo+e8ervW4SOb5AuRs9PZemPZ4us737d2I8JGMiPZriS76tnbK9JTU6vpITEj0stdg9cRC6vcfkUDyovho9750zvsCKub3/Rzu+GmAMPZoLBD6CCMG9H8nBvmTwFj1LT90+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.5256412625312805, 0.8248687386512756, 0.45793473720550537, 0.8605185151100159, 0.41744932532310486, 0.8761753439903259, 0.6323373913764954, 0.7565934658050537, 0.6648380160331726, 0.7313669919967651, 0.6943894028663635, 0.7043389678001404, 0.72172611951828, 0.3250804841518402, 0.8962647914886475, 0.6675595641136169, 0.7484984993934631, 0.3536776900291443, 0.8895854353904724, 0.35201022028923035, 0.8918532133102417, 0.6602283716201782, 0.7452659606933594, 0.6582269072532654, 0.2524130642414093, 0.917079508304596, 0.7705786228179932, 0.6142973899841309, 0.7849718332290649, 0.5895155668258667, 0.8005152344703674, 0.43997082114219666, 0.8593524694442749, 0.5531554222106934, 0.182420551776886, 0.9353232979774475, 0.8434819579124451, 0.45985931158065796, 0.8621677160263062, 0.5924195647239685, 0.777825653553009, 0.37582361698150635, 0.8886260986328125, 0.6741248965263367, 0.28747785091400146, 0.9027026295661926, 0.2959340810775757, 0.9025185704231262, 0.7082251310348511, 0.6970930099487305, 0.7032329440116882, 0.7009216547012329, 0.30063489079475403, 0.9033343195915222, 0.28241822123527527, 0.9098028540611267, 0.7454308271408081, 0.3646588921546936, 0.8789021968841553, 0.36683428287506104, 0.8802663087844849, 0.6412331461906433, 0.7515718936920166, 0.6350235342979431, 0.755964994430542, 0.6265958547592163, 0.7617502808570862, 0.6158391833305359, 0.7688626646995544, 0.6025813221931458, 0.7772388458251953, 0.41340601444244385, 0.8601136207580566, 0.4070168733596802, 0.8646483421325684, 0.6092738509178162, 0.7653318643569946, 0.38926249742507935, 0.8717235326766968, 0.6342201828956604, 0.2572495937347412, 0.9108331799507141, 0.7513124942779541, 0.37811678647994995, 0.8719948530197144, 0.36002182960510254, 0.880423367023468, 0.3321553170681, 0.8915058970451355, 0.7042906284332275, 0.3347679078578949, 0.11618360131978989, 0.05332870036363602, 0.9659713506698608, 0.9502090811729431, 0.9039193987846375, 0.7499552369117737, 0.5826046466827393, 0.7750289440155029, 0.4584871828556061, 0.81419837474823, 0.5211343765258789, 0.8059515357017517, 0.4875529408454895, 0.8215456604957581, 0.45166710019111633, 0.8367767930030823, 0.4134962558746338, 0.8516069650650024, 0.6266768574714661, 0.687355101108551, 0.6514890193939209, 0.334202378988266, 0.14039455354213715, 0.9345529675483704, 0.8740472197532654, 0.36252230405807495, 0.896816074848175, 0.6860520839691162, 0.7085597515106201, 0.7153002619743347, 0.6816492080688477, 0.7390608787536621, 0.34369027614593506, 0.8874228000640869, 0.658348560333252, 0.752244770526886, 0.6502488255500793, 0.7577680349349976, 0.6442596316337585, 0.7616212964057922, 0.3597574830055237, 0.887228786945343, 0.663818895816803, 0.25934192538261414, 0.9174041748046875, 0.74948650598526, 0.33978766202926636, 0.8942300081253052, 0.6890450119972229, 0.2840145230293274, 0.9105319380760193, 0.719329297542572, 0.3007564842700958, 0.9059088230133057, 0.2674787640571594, 0.916103720664978, 0.7743216156959534, 0.40157946944236755, 0.8711622953414917, 0.5707144737243652, 0.8170074820518494, 0.5215039849281311, 0.8404732346534729, 0.4640161395072937, 0.13695785403251648, 0.9464011192321777, 0.8910295367240906, 0.6980783939361572, 0.6736809611320496, 0.7521014213562012, 0.3941214680671692, 0.8625127077102661, 0.4404342472553253, 0.8474799394607544, 0.47648704051971436, 0.8352437615394592, 0.49761420488357544, 0.8413048982620239, 0.5463384985923767, 0.8017957210540771, 0.43185946345329285, 0.8646782040596008, 0.6081406474113464, 0.7633503675460815, 0.6285426020622253, 0.2485833466053009, 0.0842682495713234, 0.9590526223182678, 0.9267340898513794, 0.808161735534668, 0.5388147830963135, 0.8305384516716003, 0.5077704787254333, 0.825065553188324, 0.5343879461288452, 0.8146411776542664, 0.5551165342330933, 0.8061191439628601, 0.4295116066932678, 0.8729360103607178, 0.3912937045097351, 0.886841893196106, 0.3450402021408081, 0.9012870788574219, 0.70702064037323, 0.6860236525535583, 0.7417863011360168, 0.6472751498222351, 0.22767147421836853], "advantages": [2.3915624618530273, 0.9128128290176392, 1.9939098358154297, 4.373640060424805, 1.7288860082626343, 4.224225997924805, 1.5726784467697144, -0.17270918190479279, 1.3748666048049927, -0.490682989358902, 1.202043056488037, -0.7665486931800842, 1.056379795074463, -0.9943071603775024, -1.921170711517334, -1.345302700996399, 0.5937110781669617, -1.5748848915100098, -2.789512872695923, -1.9502424001693726, -3.2849485874176025, -2.400507926940918, -0.5713468194007874, -2.7596092224121094, -0.9667003154754639, 1.0491036176681519, -1.144873857498169, -3.192415475845337, -1.4253991842269897, -3.391909122467041, -1.6913598775863647, -3.5597715377807617, -5.203500747680664, -3.922785520553589, -2.4654603004455566, -0.8183251023292542, -2.5049383640289307, -4.026360034942627, -2.6796934604644775, -4.064598560333252, -5.498188018798828, -4.320070266723633, -3.22491455078125, -4.361584663391113, -5.5552167892456055, -6.870035648345947, -5.913549423217773, -7.11842679977417, -6.396326541900635, -5.94174337387085, -6.774332046508789, -6.455793857574463, -7.192698001861572, -8.07259464263916, -7.874381065368652, -8.623242378234863, -8.654796600341797, -8.83682918548584, -8.763457298278809, -9.346219062805176, -9.320013999938965, -9.839715957641602, -10.233786582946777, -10.577777862548828, -10.916341781616211, -11.346333503723145, -11.63433837890625, -12.14673137664795, -12.388915061950684, -12.980792999267578, -13.181760787963867, -13.850817680358887, -14.28651237487793, -14.506301879882812, -14.988292694091797, -15.159668922424316, -15.276196479797363, -16.052671432495117, -16.63519287109375, -16.730117797851562, -16.782697677612305, -17.1455135345459, -17.86905860900879, -18.816301345825195, -19.566633224487305, -19.58060646057129, -20.33991050720215, -20.35165023803711, -21.093889236450195, -21.123939514160156, -21.0780029296875, -21.398609161376953, -22.184791564941406, -23.385377883911133, -23.519378662109375, -24.169841766357422, -25.254911422729492, -26.55855941772461, -26.465360641479492, -27.809185028076172, -28.995939254760742, -28.951562881469727, -28.935897827148438, -30.32781982421875, -30.33701515197754, -31.768753051757812, -31.799833297729492, -33.27592086791992, -33.323875427246094, -34.850650787353516, -36.28150939941406, -36.345703125, -37.7971076965332, -38.577781677246094, -37.94478225708008, -39.647216796875, 16.350248336791992, 16.597885131835938, 16.050472259521484, 16.253828048706055, 15.76790714263916, 15.910856246948242, 15.485125541687012, 15.571906089782715, 16.475852966308594, 15.268145561218262, 14.935413360595703, 14.946843147277832, 14.645359992980957, 14.624359130859375, 14.35145378112793, 14.300219535827637, 15.055255889892578, 14.010693550109863, 13.775786399841309, 14.385168075561523, 13.436009407043457, 13.329998016357422, 14.034575462341309, 13.035112380981445, 12.832073211669922, 13.481013298034668, 12.468961715698242, 12.332097053527832, 13.03783893585205, 12.03313159942627, 12.801027297973633, 11.753522872924805, 11.526556968688965, 12.149247169494629, 11.119643211364746, 11.061503410339355, 10.75860595703125, 10.74571704864502, 10.395262718200684, 10.44758415222168, 11.762396812438965, 10.271475791931152, 9.765162467956543, 10.128925323486328, 9.379042625427246, 9.62575912475586, 10.951822280883789, 9.047138214111328, 10.22319221496582, 8.495255470275879, 9.526464462280273, 7.965987205505371, 7.8194427490234375, 7.537895679473877, 8.315401077270508, 7.053107261657715, 7.229898452758789, 6.676485061645508, 7.1954145431518555, 6.253802299499512, 6.625970363616943, 8.238375663757324, 10.621915817260742, 7.401065349578857, 5.3030877113342285, 4.76797342300415, 4.780954837799072, 4.279568672180176, 5.106289386749268, 3.8309929370880127, 4.702719211578369, 3.385199546813965, 4.311069488525391, 2.9430389404296875, 2.835113763809204, 2.4244446754455566, 2.2777273654937744, 1.8650037050247192, 1.7062467336654663, 1.2580653429031372, 2.148122549057007, 0.6627457141876221, 1.50052011013031, 0.04488372802734375]}
+{"type": "SampleBatch", "eps_id": [89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 89905220, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAIIIwb0fycG+ZPAWPUtP3T49idC96VE8vj1ZOj13CRs+oxHYvV5hwr5lwEY9F4rqPoue573+sD2+IUdsPcltOT78NO+9TowTPLccez3uLL69jtbuvRATUD5RgXM9Pi67vt+D5r28qu47Zo5VPZJTYL1/N+a9pHVOPtkRUT2HPqm+WvXdvXlnvjua/TU9mwy2vGy43b0cE00+jis0PVzjmb51hNW9sgWVO0yMGz1Bh7k7xVTVvRivQ74JAxw94cKePpMo3b0A22I75mk1PT7i8jxHBN294KtKPq7XNz0NtX6+7OjUvSZlDztGdyM9mZNsPfvR1L3Yb0k+jDIoPaJmY75Ew8y9XjeHOmABFj0Rjao9c7jMvfxNSD7S0xw9nl9KvlS1xL3R0R+4NqMMPTPk2j27tcS9gFRIvqhkFT0gBdI+HLnMvf1riboU/zY9A1UEPhrEzL03DEY+PZVBPSqbGL4Y2MS9aqYau91fNT01+iE+1/DEvSq2RD4pVUI9uTD2vYMSvb1DV3C7LHw4PYCLPz73OL29bVxDPgPPRz0Xi7q9eGi1vTr2o7vPWEA9KsddPvCctb1q9kE+0xZSPWePeb3D2q29HH3EPhMZTT3giqy+rCKevRl7QD68fTE9czDtvKtvlr25z8M+iB4vPRaFnb5zxYa9wTg/PoTqFT1INOy6rj5+vf+bEby5xBU9ArqaPg/5fr3SJj4+U4YuPb49rjzBwm+9DqfCPmJEMD157YO+yZ1QvSjmPD6gKBs9dLVFPSKBQb1MEMI+5RwfPabMbb5JdCK96tUSP8QWDD0bLQO/Bu/mvMqoRD+fOcQ8YUtLv7ckUrxHoBI/uDsEPEjt/L5EsrO6u0rBPpMe7bqVjUu+VH3KOzXOOj7Hjb27fiO/PcUFITzvWME+s2OAu0kATr7nYY48kPI6PnYdArw34Lg9RkusPBtvwT7lEcm7AdNRvmMx6jxPJzs+v60nvP7Jrz2ZEQQ9to3BPm6NC7zlGVe+jQkjPZFtOz6EYlC8UK+jPRQIMj1mtcE+/DE2vDPyXb5iBlE9hNgSP883fbxXTgO//gGAPfrmwT4VpdK8dItmvhyFjz2sYjw+Moj3vIX6cj0uDpc9emcvvBjQ7bx8RK8+652WPaFAPT4uurW8vmYmPd0vnj0ZsMI+OxKvvCeYhL4Tw609z+M9PlyA2by2Ttw8i1u1PX0Jwz5jGNW860+Mvuf1xD22qD4+Yf8AvWMRKTxBlsw9rnTDPvkmAL1KkpW+MDncPdWTPz5rFRi9lkobvPHi4z0W9MM+MdwYvY2VoL4RkPM9japAPriNMr33/Qa9+ET7PZKKxD7hQDW9bpqtvhF/BT4880E+qwdRvdl4eL0XYAk+fjvFPtj/Vb2A7by+wEMRPll1Qz5TOnS9MvO+vYAsFT4C/lq7p917vf5fOD77GhU++TlFPqcdbb1rhwa+xwwZPivqxj7O4He9gSzivqoBIT7c80Y+cgiOvVLJLL5N/CQ+sExsOcfxlL1ub889fP0kPogFST61y5C9SoxavrcCKT505Mg+pImZvUsNB7/aCzE+MiNLPl4lr72xz4S+6xs1PgrYlTtXxbm9zjaoO+QzNT6orUG+g4+5veomiT5CVDE+zFXqO6SWrr0szlS9wXkxPokhP75st7C94sNZPimnLT6nJR08ggGovZr52L1z2S0+7688vmtYrL36miM+XxMqPrndQzwZzaW9uQYivgxSKj5bSTq+QEisvZXj3D1DmCY+UWHAvkzdp71D574+SuYePvnREb+Yl5i9H3wjP+Q7Ez6wP7++g958vUmYpT6AlQs+xtk1vr9fYr3Vf0M8bfIHPk7BlTyCZWG9TmeYvkVSCD5lPTS+9cd5vbQGu7xxtwQ+DTujPL6me73hEKu+6R8FPvtzMr7Ngou9hZN7vTuOAT7FmLy+1gaOvXAIVj7/BfQ9pvoPvyR3hb0TRfU+nfzcPQGeu74HsGO90owqPjf6zT1lqS6+KQtWvRmSEr6t/cY9udS6vu7EYb0OzAc+YQu4PT4YLb7O51a9tS41vuQesT3JC7q+bGZlvaU6yj2rPKI9UYMrvphPXb1tIli+YGCbPTA+ub4Imm69AEODPZqOjD3WWA6/6Vlpvbb5rD4ikGs9t2a4vtasTb0WWeM8Dw9OPWNMKL7TZku93oiPvk6YQD0wxwA9AV5ivW+YFr+mK0M9a8YmvmRHib2cgaC+GdQ1PRnFtr6PHpa9q8wuvdeVGD1vDA2/DN6XvQ9vZz5J5tY8pLU+vyqcjr1sI/0+3rA5PIiGDL/It3S9n/Q4PvwuujnUPj6/5OtlvSJi6D4msm28XRoMv3m9QL3bfhM+jIPQvLbfPb/D8DS9itDXPj4EJb0RxQu//2gSvZ8B7D0vvlG92pU9v0v4CL0v+co+gjSHvVGEC78B/dC81Ea/PR6Hnb1FXz2/qK/BvCF/wT7N07u9SFYLv2vEg7yVf589scz6PHd4tzyX/9U8v/UfPQ94/jxnWF4+lGXcPGMpe76pBRE9FUOxPP81tDxBcGQ9dMsSPfehXT41Wb08fWhrvn2GJD28i9I+466XPMPjA79yNkY9+ScbPyqMBjznTk2/19x3PUpa0j47PwC8FLkBv3HCjD2sA10+bSWTvJG4Xb6hmZU9BNGrPB6ftrw5QpA9jnWWPRGaXT6zFKu8qrFqvsJSnz3d3rA8xKHQvCHGaD0nNaA9KUdePilSx7wXoXm+RxmpPR6ztjz5Qu+8enkoPSIDqj34Dl8+y4XovC5whb5B77I9tXC9PIecCb24Nrw8vOGzPYqdL76zuge9JRicPm/brD27QMU8J4LdvGpS/Drr1609n70uvmkx3bzDaJI+k9qmPb2UyzyRV668E7h3vCjfpz3ICS6+utGwvM+iij4B6aA97JnQPKt0hLwsm+q8BPShPQ99Lb7aJYm8eY6EPn4Dmz2qdNQ8o3U9vOrPH71wE5w9gBMtvpc+SrzT/n8+IyeVPXhB1zzlpvC7vrA+vao6lj2j0WI+yZQHvCffrr5KTZ89JV/VPsN/d7wbByO/JF+wPVAoYz5ZFuS8Mqiyvjx1uT1MJ94896AOvRF1hb2Xkbo9LxFkPpH3E729vLy+/7DDPYdz5jw/KjK9pk2zvfnXxD2hOGU+UFY5vU6Kyb4zA849ZdLwPGeVWb38puy9dDfPPRfiKL64DGO9Lr8jPhZ2yD1NgP08MPNVvbFTGb6Susk9kTdoPlI3Yr3otuq+eQTTPXgEBT2d4oO98w48vv9Y1D168Gk+VWiLvffq/b6JtN09/LkMPZG4n73Y3Wa+zBzfPbUIbD6k9Ki90KsKv8mN6D3nCRY9nSS/vQ5cjb7iDeo9R9Agvqhzyr1fYWm8J5/jPXiEs74FCcu9CpV7PqJC1T0L7x2+0vjAvXnFnL1l8c49Gh6yvn0bxL0MbD0+i7HAPZ8rG77Oh7y9xZMLvpp8uj1KwbC+FB3CvWv/AD6oWKw9YvMJvyT0vL0RTsY+MkaWPayJO7/bFq29U0gmP0yJcD0VUAm/7HuSvZe4qT6hmEQ9wT2uvgnohL06Wow8tbcoPfzJE75jNIS9aDGXvv3kHD37S62+0UyQvTDEwbzBKgE941IIv9dEkb2+5Xs+OBarPI//Ob9pMYe97fYDPwsx0Dtk0we/SChkvayeTz64hYu7Yl2rvjqMU724Edu9Q28zvE1vB79/T1y9MAotPkRlsLxLmKq+pHdOvb2ID75m/Oa8Dw4HvzjzWb1rdgs+5rUevRXNOL8Hy069ssDSPs7YWb0jrQa/mBItvenk0z2+eIK9WXc4v8yYJL3y2cM+e/yfvf9fBr+6QgW9XJuePXp8tb36Mzi/MNX9vAkpuD5v9dK9hCQGv8bmwrwrGms962vovScBOL9Uf7m8nFmvPl/uAr4E+QW/p2KBvFwULz0ipg2+yu2nvq3DdLygf4K+uV0UvjncBb9HJKS8UWMHPS4TH76FrKe+6LmevNUhiL4pyCW+TrcFv9pJyrwY+Kg8qnowvgyTN7+76Ma8mV+cPkUqP75Xb2m/n96UvM8gFz8E11G+eGs3vyGX0LsXgJU+dINgvv5Yab/X1gm6TCkVP2ouc74AZTe/wE82PFpilD4r7YC+AHwFv4KjijzLBy06DESGviZ/N78xv4o8leaYPg6bjb65mwW/0qy7PO/NOTw085K+RKQ3v3qIvTy8UJ8+sUuavtnGBb+Yg/A8auPTPJGln74h1Te/dsD0PLXDpz4DAKe+UP4Fv903FT3CezY9lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAD2J0L3pUTy+PVk6PXcJGz6jEdi9XmHCvmXARj0Xiuo+i57nvf6wPb4hR2w9yW05Pvw0771OjBM8txx7Pe4svr2O1u69EBNQPlGBcz0+Lru+34Pmvbyq7jtmjlU9klNgvX835r2kdU4+2RFRPYc+qb5a9d29eWe+O5r9NT2bDLa8bLjdvRwTTT6OKzQ9XOOZvnWE1b2yBZU7TIwbPUGHuTvFVNW9GK9DvgkDHD3hwp4+kyjdvQDbYjvmaTU9PuLyPEcE3b3gq0o+rtc3PQ21fr7s6NS9JmUPO0Z3Iz2Zk2w9+9HUvdhvST6MMig9omZjvkTDzL1eN4c6YAEWPRGNqj1zuMy9/E1IPtLTHD2eX0q+VLXEvdHRH7g2oww9M+TaPbu1xL2AVEi+qGQVPSAF0j4cucy9/WuJuhT/Nj0DVQQ+GsTMvTcMRj49lUE9KpsYvhjYxL1qphq73V81PTX6IT7X8MS9KrZEPilVQj25MPa9gxK9vUNXcLssfDg9gIs/Pvc4vb1tXEM+A89HPReLur14aLW9Ovaju89YQD0qx10+8Jy1vWr2QT7TFlI9Z495vcParb0cfcQ+ExlNPeCKrL6sIp69GXtAPrx9MT1zMO28q2+WvbnPwz6IHi89FoWdvnPFhr3BOD8+hOoVPUg07LquPn69/5sRvLnEFT0Cupo+D/l+vdImPj5Thi49vj2uPMHCb70Op8I+YkQwPXntg77JnVC9KOY8PqAoGz10tUU9IoFBvUwQwj7lHB89psxtvkl0Ir3q1RI/xBYMPRstA78G7+a8yqhEP585xDxhS0u/tyRSvEegEj+4OwQ8SO38vkSys7q7SsE+kx7tupWNS75Ufco7Nc46PseNvbt+I789xQUhPO9YwT6zY4C7SQBOvudhjjyQ8jo+dh0CvDfguD1GS6w8G2/BPuURybsB01G+YzHqPE8nOz6/rSe8/smvPZkRBD22jcE+bo0LvOUZV76NCSM9kW07PoRiULxQr6M9FAgyPWa1wT78MTa8M/JdvmIGUT2E2BI/zzd9vFdOA7/+AYA9+ubBPhWl0rx0i2a+HIWPPaxiPD4yiPe8hfpyPS4Olz16Zy+8GNDtvHxErz7rnZY9oUA9Pi66tby+ZiY93S+ePRmwwj47Eq+8J5iEvhPDrT3P4z0+XIDZvLZO3DyLW7U9fQnDPmMY1bzrT4y+5/XEPbaoPj5h/wC9YxEpPEGWzD2udMM++SYAvUqSlb4wOdw91ZM/PmsVGL2WShu88eLjPRb0wz4x3Bi9jZWgvhGQ8z2NqkA+uI0yvff9Br34RPs9korEPuFANb1umq2+EX8FPjzzQT6rB1G92Xh4vRdgCT5+O8U+2P9VvYDtvL7AQxE+WXVDPlM6dL0y8769gCwVPgL+Wrun3Xu9/l84PvsaFT75OUU+px1tvWuHBr7HDBk+K+rGPs7gd72BLOK+qgEhPtzzRj5yCI69Usksvk38JD6wTGw5x/GUvW5vzz18/SQ+iAVJPrXLkL1KjFq+twIpPnTkyD6kiZm9Sw0Hv9oLMT4yI0s+XiWvvbHPhL7rGzU+CtiVO1fFub3ONqg75DM1PqitQb6Dj7m96iaJPkJUMT7MVeo7pJauvSzOVL3BeTE+iSE/vmy3sL3iw1k+KactPqclHTyCAai9mvnYvXPZLT7vrzy+a1isvfqaIz5fEyo+ud1DPBnNpb25BiK+DFIqPltJOr5ASKy9lePcPUOYJj5RYcC+TN2nvUPnvj5K5h4++dERv5iXmL0ffCM/5DsTPrA/v76D3ny9SZilPoCVCz7G2TW+v19ivdV/Qzxt8gc+TsGVPIJlYb1OZ5i+RVIIPmU9NL71x3m9tAa7vHG3BD4NO6M8vqZ7veEQq77pHwU++3Myvs2Ci72Fk3u9O44BPsWYvL7WBo69cAhWPv8F9D2m+g+/JHeFvRNF9T6d/Nw9AZ67vgewY73SjCo+N/rNPWWpLr4pC1a9GZISvq39xj251Lq+7sRhvQ7MBz5hC7g9Phgtvs7nVr21LjW+5B6xPckLur5sZmW9pTrKPas8oj1Rgyu+mE9dvW0iWL5gYJs9MD65vgiabr0AQ4M9mo6MPdZYDr/pWWm9tvmsPiKQaz23Zri+1qxNvRZZ4zwPD049Y0wovtNmS73eiI++TphAPTDHAD0BXmK9b5gWv6YrQz1rxia+ZEeJvZyBoL4Z1DU9GcW2vo8elr2rzC6915UYPW8MDb8M3pe9D29nPknm1jyktT6/KpyOvWwj/T7esDk8iIYMv8i3dL2f9Dg+/C66OdQ+Pr/k62W9ImLoPiaybbxdGgy/eb1Avdt+Ez6Mg9C8tt89v8PwNL2K0Nc+PgQlvRHFC7//aBK9nwHsPS++Ub3alT2/S/gIvS/5yj6CNIe9UYQLvwH90LzURr89HoedvUVfPb+or8G8IX/BPs3Tu71IVgu/a8SDvJV/nz0MH9K9cjo9v8gDbryeG7s+D3j+PGdYXj6UZdw8Yyl7vqkFET0VQ7E8/zW0PEFwZD10yxI996FdPjVZvTx9aGu+fYYkPbyL0j7jrpc8w+MDv3I2Rj35Jxs/KowGPOdOTb/X3Hc9SlrSPjs/ALwUuQG/ccKMPawDXT5tJZO8kbhdvqGZlT0E0as8Hp+2vDlCkD2OdZY9EZpdPrMUq7yqsWq+wlKfPd3esDzEodC8IcZoPSc1oD0pR14+KVLHvBeheb5HGak9HrO2PPlC77x6eSg9IgOqPfgOXz7Lhei8LnCFvkHvsj21cL08h5wJvbg2vDy84bM9ip0vvrO6B70lGJw+b9usPbtAxTwngt28alL8OuvXrT2fvS6+aTHdvMNokj6T2qY9vZTLPJFXrrwTuHe8KN+nPcgJLr660bC8z6KKPgHpoD3smdA8q3SEvCyb6rwE9KE9D30tvtolibx5joQ+fgObPap01DyjdT286s8fvXATnD2AEy2+lz5KvNP+fz4jJ5U9eEHXPOWm8Lu+sD69qjqWPaPRYj7JlAe8J9+uvkpNnz0lX9U+w393vBsHI78kX7A9UChjPlkW5LwyqLK+PHW5PUwn3jz3oA69EXWFvZeRuj0vEWQ+kfcTvb28vL7/sMM9h3PmPD8qMr2mTbO9+dfEPaE4ZT5QVjm9TorJvjMDzj1l0vA8Z5VZvfym7L10N889F+IovrgMY70uvyM+FnbIPU2A/Tww81W9sVMZvpK6yT2RN2g+Ujdivei26r55BNM9eAQFPZ3ig73zDjy+/1jUPXrwaT5VaIu99+r9vom03T38uQw9kbifvdjdZr7MHN89tQhsPqT0qL3Qqwq/yY3oPecJFj2dJL+9DlyNvuIN6j1H0CC+qHPKvV9habwnn+M9eISzvgUJy70KlXs+okLVPQvvHb7S+MC9ecWcvWXxzj0aHrK+fRvEvQxsPT6LscA9nysbvs6HvL3Fkwu+mny6PUrBsL4UHcK9a/8APqhYrD1i8wm/JPS8vRFOxj4yRpY9rIk7v9sWrb1TSCY/TIlwPRVQCb/se5K9l7ipPqGYRD3BPa6+CeiEvTpajDy1tyg9/MkTvmM0hL1oMZe+/eQcPftLrb7RTJC9MMTBvMEqAT3jUgi/10SRvb7lez44Fqs8j/85v2kxh73t9gM/CzHQO2TTB79IKGS9rJ5PPriFi7tiXau+OoxTvbgR271DbzO8TW8Hv39PXL0wCi0+RGWwvEuYqr6kd069vYgPvmb85rwPDge/OPNZvWt2Cz7mtR69Fc04vwfLTr2ywNI+zthZvSOtBr+YEi296eTTPb54gr1Zdzi/zJgkvfLZwz57/J+9/18Gv7pCBb1cm549eny1vfozOL8w1f28CSm4Pm/10r2EJAa/xubCvCsaaz3ra+i9JwE4v1R/ubycWa8+X+4CvgT5Bb+nYoG8XBQvPSKmDb7K7ae+rcN0vKB/gr65XRS+OdwFv0ckpLxRYwc9LhMfvoWsp77ouZ681SGIvinIJb5OtwW/2knKvBj4qDyqejC+DJM3v7voxryZX5w+RSo/vldvab+f3pS8zyAXPwTXUb54aze/IZfQuxeAlT50g2C+/lhpv9fWCbpMKRU/ai5zvgBlN7/ATzY8WmKUPivtgL4AfAW/gqOKPMsHLToMRIa+Jn83vzG/ijyV5pg+DpuNvrmbBb/SrLs87805PDTzkr5EpDe/eoi9PLxQnz6xS5q+2cYFv5iD8Dxq49M8kaWfviHVN792wPQ8tcOnPgMAp75Q/gW/3TcVPcJ7Nj0bXKy+fFyovi7eGD2kF3K+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.9294587969779968, 0.18570725619792938, 0.9389818906784058, 0.8537620306015015, 0.5756825804710388, 0.7748913764953613, 0.6283503770828247, 0.7425888776779175, 0.6703974604606628, 0.7113828659057617, 0.296244740486145, 0.9111892580986023, 0.7489678859710693, 0.6216521859169006, 0.7777647376060486, 0.5808979868888855, 0.8015543222427368, 0.5408425331115723, 0.17833690345287323, 0.9369442462921143, 0.8489972949028015, 0.4280758202075958, 0.8676512837409973, 0.376761257648468, 0.8837069869041443, 0.327106237411499, 0.8976956605911255, 0.7202097177505493, 0.6347616910934448, 0.748841404914856, 0.6020268201828003, 0.2292359620332718, 0.921622097492218, 0.8006035685539246, 0.5160488486289978, 0.8193137049674988, 0.5181660056114197, 0.20087255537509918, 0.9219332337379456, 0.818394660949707, 0.5186336636543274, 0.8136024475097656, 0.5318852663040161, 0.8075593113899231, 0.5480309724807739, 0.7995930314064026, 0.5672764182090759, 0.7892532348632812, 0.41018688678741455, 0.8700136542320251, 0.6419435739517212, 0.2646118104457855, 0.9107948541641235, 0.7202782034873962, 0.6971978545188904, 0.6888979077339172, 0.7320569157600403, 0.6482886075973511, 0.7683077454566956, 0.5963469743728638, 0.8044509887695312, 0.5315272212028503, 0.8387905359268188, 0.45418357849121094, 0.8697337508201599, 0.631721019744873, 0.7413284182548523, 0.30365633964538574, 0.9114919304847717, 0.7701616287231445, 0.5772626996040344, 0.17918668687343597, 0.9383260607719421, 0.8686422109603882, 0.650863766670227, 0.696381151676178, 0.7289552092552185, 0.610662043094635, 0.7889453172683716, 0.5166381597518921, 0.8340479135513306, 0.5786466598510742, 0.254859060049057, 0.9007974863052368, 0.714186429977417, 0.3216126561164856, 0.894332230091095, 0.26670607924461365, 0.9086517095565796, 0.78337162733078, 0.4880681037902832, 0.8002294301986694, 0.4696184992790222, 0.8347337245941162, 0.4158012866973877, 0.8555151224136353, 0.36345183849334717, 0.8735378980636597, 0.6867836713790894, 0.641447126865387, 0.28130972385406494, 0.10146287083625793, 0.9494880437850952, 0.9127708673477173, 0.8083004951477051, 0.5610784888267517, 0.7359136939048767, 0.5986239910125732, 0.7132056951522827, 0.6249274015426636, 0.6977112293243408, 0.6417592167854309, 0.6897695064544678, 0.6503246426582336, 0.689414381980896, 0.6512210965156555, 0.7156968116760254, 0.6863346099853516, 0.7332778573036194, 0.33137035369873047, 0.10728571563959122, 0.9510492086410522, 0.9048927426338196, 0.7296422719955444, 0.6683666706085205, 0.7536039352416992, 0.6355584859848022, 0.7785553932189941, 0.5956146121025085, 0.8039855360984802, 0.4525139629840851, 0.8529237508773804, 0.4803846776485443, 0.8434273600578308, 0.5018799304962158, 0.8358321785926819, 0.5174911618232727, 0.8303287029266357, 0.5276968479156494, 0.8270243406295776, 0.4671238362789154, 0.14471691846847534, 0.9429692029953003, 0.8756961822509766, 0.3665030598640442, 0.8914253115653992, 0.3101690411567688, 0.9065641164779663, 0.7475004196166992, 0.6181640028953552, 0.21252992749214172, 0.9287680387496948, 0.16737596690654755, 0.9386832118034363, 0.12912197411060333, 0.9469860792160034, 0.9007706046104431, 0.7613030076026917, 0.5397393107414246, 0.8153049945831299, 0.4414641261100769, 0.8543521761894226, 0.6489368677139282, 0.3345296382904053, 0.8640318512916565, 0.6234999299049377, 0.2589537501335144, 0.9074349999427795, 0.7864785194396973, 0.5088066458702087, 0.7795531749725342, 0.4558618664741516, 0.8324680924415588, 0.41077080368995667, 0.8511573672294617, 0.6324989795684814, 0.6925164461135864, 0.655647337436676, 0.6772750616073608, 0.6708199977874756, 0.6688694357872009, 0.6791495680809021, 0.6672120094299316, 0.3187441825866699, 0.8853155970573425, 0.30418798327445984, 0.8911730051040649, 0.7137296199798584, 0.3683795928955078, 0.8657789826393127, 0.34493616223335266, 0.8797029256820679, 0.6938333511352539, 0.6563680171966553, 0.7246184945106506, 0.6215493679046631, 0.7580873370170593, 0.5766127109527588, 0.7927441000938416, 0.47962790727615356], "advantages": [0.22605036199092865, -0.42867884039878845, -0.39026838541030884, -1.1715447902679443, -0.6946521401405334, 0.8302022814750671, -1.5535041093826294, -0.13991041481494904, -2.4016759395599365, -1.101873755455017, -3.2444748878479004, -4.072220325469971, -4.134042263031006, -3.1309421062469482, -5.014195919036865, -4.158489227294922, -5.894867897033691, -5.186922550201416, -6.77764892578125, -7.112428188323975, -7.688177585601807, -7.322371482849121, -8.586888313293457, -8.391387939453125, -9.478861808776855, -9.448920249938965, -10.360526084899902, -10.490240097045898, -9.832526206970215, -11.496274948120117, -11.01651668548584, -12.474371910095215, -12.940675735473633, -13.439154624938965, -13.287638664245605, -14.382938385009766, -14.363130569458008, -13.511707305908203, -11.212190628051758, -14.58983325958252, -16.341259002685547, -17.146753311157227, -17.31726837158203, -18.075721740722656, -18.288162231445312, -19.007139205932617, -19.25596046447754, -19.941680908203125, -20.222368240356445, -19.703187942504883, -21.154199600219727, -21.80600929260254, -21.722909927368164, -22.79515266418457, -23.13188934326172, -23.75691032409668, -24.111248016357422, -24.729291915893555, -25.097023010253906, -25.714881896972656, -26.090248107910156, -26.716588973999023, -27.091075897216797, -27.737407684326172, -28.097753524780273, -28.77964210510254, -28.561779022216797, -29.934280395507812, -30.217309951782227, -31.02098846435547, -30.877267837524414, -32.234561920166016, -32.31761932373047, -33.347625732421875, -33.36613082885742, -32.347923278808594, -34.885929107666016, -34.01177978515625, -36.41779708862305, -35.70397186279297, -37.95089340209961, -37.414669036865234, -35.95866012573242, -34.20427703857422, -38.15470504760742, -41.30111312866211, -42.83456802368164, -42.96514129638672, -44.246761322021484, -44.590572357177734, -43.81086349487305, -42.177825927734375, -45.9426383972168, -48.067527770996094, -47.82322311401367, -49.5897216796875, -49.615081787109375, -50.965999603271484, -51.28434753417969, -50.43016815185547, -52.96976089477539, -53.268959045410156, -51.18247985839844, -53.687198638916016, -55.06084442138672, -55.323970794677734, -54.65584182739258, -56.81843948364258, -56.601871490478516, -58.014732360839844, -58.30796432495117, -58.95310592651367, -59.76710891723633, -59.71620178222656, -61.001075744628906, -60.4132080078125, 8.784515380859375, 8.630024909973145, 8.396060943603516, 8.196632385253906, 8.733899116516113, 10.577536582946777, 8.395715713500977, 7.446453094482422, 7.281351566314697, 7.0236921310424805, 6.848047256469727, 6.596985340118408, 6.404021739959717, 6.167863845825195, 5.949280738830566, 6.610771179199219, 5.4270195960998535, 6.049040794372559, 4.896714210510254, 5.482761383056641, 4.3576979637146, 4.911358833312988, 3.809469223022461, 4.334542274475098, 3.2517106533050537, 3.1204702854156494, 4.0510640144348145, 2.69480299949646, 2.2688372135162354, 2.237015724182129, 1.7450361251831055, 1.8114523887634277, 1.229501485824585, 1.5346274375915527, 0.6508389115333557, 1.0282247066497803, 0.1845501959323883, 0.8386350870132446, -0.1987219899892807, 0.8794696927070618, -0.4212284982204437, -0.6618558764457703, 0.18327677249908447, -1.2055025100708008, -0.6214536428451538, -1.562678575515747, -1.2707750797271729, 0.09874255955219269, 2.097134828567505, -1.05623459815979, -2.5054452419281006, -1.6038429737091064, -2.4810168743133545, -2.222179412841797, -0.9870624542236328, -2.698495626449585, -2.021012783050537, -2.715372323989868, -1.2873162031173706, -2.4002737998962402, -2.547067165374756, -1.9779797792434692, -2.6178901195526123, -1.4094657897949219, -2.4450089931488037, -0.8139810562133789, -2.099644899368286, -0.2879801094532013, 1.6387158632278442, 0.3563217520713806, 2.0343728065490723, 0.9628010392189026, -0.28174227476119995, -1.439690113067627, -0.020464925095438957, -1.1354455947875977, 0.03881257772445679, 0.8591410517692566, 0.10383941978216171, 0.6821367740631104, 0.06252273917198181, 0.4220903217792511, -0.07779884338378906, 0.08335113525390625]}
+{"type": "SampleBatch", "eps_id": [219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 219733619, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAABtcrL58XKi+Lt4YPaQXcr4duq++XEMGvyGABT1i2oo9+Bi1vuHeqL78DQs9LJJbvpZ5uL6ogQa/V/ryPILNtT3v2r2+MJA4v9TCAD3eBMg+3DzFvtK6Br+cwyA9aEvdPX6gyr610Ti/qp0pPfph0z4KBdK+Cupqv+ZvSz2j4jc/kGrbvsolOb/kI4M9zyDiPnnS4r5QcQe//zqVPenmLT5oPei+ro2rvsEvnD07xcq9wqvrvj/8B7+RIZg9lO1dPkAc8b7qLzq/HQKhPawpCD/Ojvi+TooIv1fLtj2yqoc++gT+vnHirb7LpcE9nPThOqK/AL9OeRW+37fBPWn2hL72fgG/0EKvvswUtz3A7no9oT8Dv/jgCb8wl7k9tfLCPpEBBr/ekbC+uS/JPbWY8T2WxQe/AOgavrIEzj1elxG+3osIv5cDsr7YMcg9yMw4PpVTCr/ZyB2+MZbPPSKHo72MHQu/kpMhPe5QzD2Hha2+2OkKv3K1IL41b749kzuHvI23C78OdBY9HMK9PQSTjr5nhwu/UWkjvjBasj11zCs9klgMv00wtr7+EbQ9b7i4PvkqDr8k9SW+EtnCPV7Lxj1m/w6/W1ABPeXSxj1aFii+BdYOv5nAKL6vGcA9e0shPgauD78Sd+w8V43GPe5U1b0wiA+/M9BjPhVJwj1dwbq+lmQOv9AO1T5VWLM9BkcgvylDDL+YLWE+X7OZPZtDnb7uIgu/R+vCPJoejT0zqgY8vgMLv5QXXz7JdI09jx6Gvi/mCb9uzrI8BrqCPS27Uz2UyQm/ayldPg7YhD39eGG+fa4Iv7n60T5upnc9aMP7vvGUBr8aVls+NF5PPZ7zOL4xfAW/gi7RPmWSQD3z7+m+sGQDv0fpWT5TJBs9P2EZvsNNAr+JlIw8Gt8OPRlQHj5ENwK/Q9lYPlqJGz2s6wG+syEBv7sJ0D6UJBE9+o7Qvj8a/r6Fxlc+C4zfPMZg1L3d8fu+wEx5PIqOzjxyNEo++cn7vl+VOL7a6O48LPn+PoKi/b77ZcC+IkAgPbi6TD/LvQC/KZA5vpDDYT2PBAU/UasBv015VjwxKoY9eKh6PiiaAb8YMVQ+7jCQPT9m2byNigC/dXLNPqkajz3hp5e+N/n8viNnGD+/+II9BekQv5zg9r42HEo/c5JXPUaCVr8Ay+6+SfwXP+LtEj10dwe/q7bovt/Ryz7qKM88Kc9mvhyj5L5jeU8++DqqPBfpmT36j+K+4HvLPg+LtjwN91e+JH7evhLYTj4h/ZM8O7W1PZ5s3L6FMMs+goaiPCD2Sr5KXNi+0HgXPzUNgjy2GPi+N03Svrbtyj6PpMo75Gc/vjk+zr6IZBc/QUkgO/GN9L72L8i+x1VJP/ri6LunAUW/SiLAvrloFz9ITri8/E71vtwTur6ScEk/+2YDvdpfR78eBbK+55oXP7szQ71sJv6+rvSrvkK4ST+43Wu9R79NvxLjo752+xc/RNqWvaOYB7/Gzp2+S6HMPkmMrL0krIW+EbeZvvGLGD/mPbe9AUAUv/2ck74o480+OPbOvZW8ob7Zfo++z5RVPpXm271MSWi9FFyNvs8deDw9Od69pxBNPmE0jb5Yt1g+XwXWvfUm/72WCYu+TLqUPMAf270slgg+/tmKvg7JWz4aqdW9CaFDvliniL5KPa08WHzdvf4yiT3ob4i+3mcwvuK92r3fuqU+gTOKvvc3xjy8e829Ml0wuhP0ib4gay2+yoLNvSN6hD4HsIu+EJHdPKjpwr3Zt4K9IGmLvrjBZD7vhsW95RjFvoIfib6O1vM8fUvVvT9O/r170Yi+z6Ynvohh2r3bIgk+q36Kvrvetr5C5dS9IUXIPvYmjr4KmCS+ut/EvaJrij1SzI++hukRPQQbwr1Y84G+8G6PvsXRIb5ngMy9fwT3OzINkb4ELx09WzHMvTw7ob6ZqJC+rOsevmAX2b0vU2K9bz+Svl2Csr7EWtu9hgtPPmfRlb55xgq/oBLTvTUk6z52Xpu+Rfywvu5CwL0uNws+oOievintGL5dsbq9hD49vh5woL4zpK++OUPCvV5Mnz1n86O+s2QJv54Tv737na0+T3KpvrlFrr7wL7G9AaSXPJburL7elRO+1m2wvWqvmb5naK6+DgStvk+5vL3p1BK9P96xvpAXCL8zMb691NRnPtRPt76brDm/P+u0vfQP+j4ivb6+K28Hv/jpoL2xTC0++ifEvhpwqr5i+5m9WLEWvp+Qx74F4Aa/egKgvZST9z2+9cy+WVKpvuIOm71YE0i+q1jQvqlQBr+pD6O9BHOUPQ641b44Mai+mRegvUYUer4zFdm+1YcHvmkYqr3HfA+/KHDavhIGp76nDcG9ihaXvlHH3b7FFgW/8CPNveR7Cb0mGuO+fqM2v+WDzr3vVGc+XmjqvrhbBL8PQ8W9nz/Gvbiz77687DW/FzrJvX3xJz6g+va+sqYDv1qCwr2uxiG++gM7PDzhV7xA8mo8JkeivKyyNjxNGTo+hnRkPHepnb7kP3I8kIFevGAa/zvmWjK8qMxtPHXIOT4F+Pc7GSqavgGglDxnGmK8qY3KOm0AxrsuXZI8gPVVvpa2ujpjApM+1kJgPG/kzr6p2eo74osUPzOztztWFVa+QsiZPElmlD4iv7o6HBTPvidFyTyDoBY/uF/au5W2Vr7x1RQ9CWqbPirlMbyog8++tbMtPZGCGz8nWpu8NdtXvhZ3Xz0jKKg+oOO9vIQbh7zKXno9ueNaPWCXwLymiFm+gb9+PXHCuj6PZeO8wYCVvJZQjj3eE709BGPmvMEHND6qGJI9ZOo1vvqUybwYXL0+2dGKPeHd5L6g/Iy8zAAyPlEFcT2a9Qi+PANhvMJyvD5jEGY9CYrQvr3P0Lvf9Q8/n7JEPfyHLr8sup87xLG7PgfZDD2Mq7++7/xHPE6rDz9xXNw8r+8nvznxvzziP7s+h8NhPLm+tb613Ps8E38uPtvk2jv1dm29CuQLPe0Zuz5N5rQ7LXSyvrXTKT1VUS4+yRW+uhmxXb28xTc9E7rLvOL7JbvD0HM+Mbw1PUBgLj6SGRI70tVivWqvQz3zErs+dAaTOvfYsb73nWE9rVMuPnbjvru+gF69LpBvPb8buz4ufeK7ypqyvhLAhj1kgy4+LY1jvNP3br0Wu409663IvD2rdrz7cWs+N7qMPdnvLj6RUyu89yuKvZK5kz1lcrs+FW9BvIwTur57uKI9PkMvPupCnLwHkJi9LLupPcyruz5md6i8Swu/vq2+uD2w1y8+u5nlvHsvsr1Nx789Bwa8Pvja87zy28a+BtLOPQKwMD7Cvhm9MIvXvU7j1T1GX7S87F0ivTSSMz5t/NQ9ib9dvk8AFL2hGuk+uh3MPZeEq7y8aN281hAbPi9Cyz0kwDI+PpnEvOw+Gb6XaNI9CnO9PjAe3byQTea+g5DhPcl+Mz5OaBO9Gbwpvou+6D10570+efwgvb9r8L7I7/c9A5g0Phx0R72QFEK+Dyn/Pfi2k7zg+la9b/+yPfxr/j3OdVm+8NFPvdjkuT4xufU9paWHvLkTMr3OmWA9kQv1PYAYWL7ElS29H7+qPr9m7D0fNHu8/UMSvS9S4zz5xes9a/ZWvgz+D71YL54+wizjPfCcaryVXe28s8+wO5uW4j3aB1a+RHvsvEzdkz7uBto9MRFdvDUqvby7gFK8c3nZPcJROj4ZRb+8qB2gvlzt4D17MVK8yYHyvLhB4bzWZuA9pn5UvhoD97w064I+5ObXPYYzRLw8Hs28S809vVJp1z2P5js+z7XUvLmRsb5s7d49bgnCPiTEBr08xyW/THPuPe3APD6y0Du9Yhm7viIA9j1MqyW8R8BZvcBMs70blvU92VZRvk7sYL2daEA+eTbtPREmzL7Hh1G9ru/sPoLh3D25yk++3p4rvScdHj64kdQ9knrLvrL4Hr2y/t0+eUrEPdicTr6S5/a8bAQEPsIGvD0x+8q+JMjhvLnq0j60yas90cRNvtVJnrzMuOI9oY6jPXSkyr6RJoy8k2HLPoNYkz1XPE2+KyMWvCEqyz3mIos9sIWpuxhD67sxEkm+p+yKPR3/TL5J+TW80J3APX25gj2R+aC7uCcXvFH3Tr76hYI98bJMvmFiWbw6frM9uKt0PT4nyr5Yqjy8kZHAPoVTVD1L/Ba/zdeCu6dGKj/JAiQ9TwrKviKIGDxUDr4+N68DPZBpTL50FYk8y9mmPbSp5jzObZu7jm6WPPPTUr7B4uU8iaxCPhdmaTzmRfy+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAB26r75cQwa/IYAFPWLaij34GLW+4d6ovvwNCz0sklu+lnm4vqiBBr9X+vI8gs21Pe/avb4wkDi/1MIAPd4EyD7cPMW+0roGv5zDID1oS909fqDKvrXROL+qnSk9+mHTPgoF0r4K6mq/5m9LPaPiNz+Qatu+yiU5v+Qjgz3PIOI+edLivlBxB7//OpU96eYtPmg96L6ujau+wS+cPTvFyr3Cq+u+P/wHv5EhmD2U7V0+QBzxvuovOr8dAqE9rCkIP86O+L5Oigi/V8u2PbKqhz76BP6+ceKtvsulwT2c9OE6or8Av055Fb7ft8E9afaEvvZ+Ab/QQq++zBS3PcDuej2hPwO/+OAJvzCXuT218sI+kQEGv96RsL65L8k9tZjxPZbFB78A6Bq+sgTOPV6XEb7eiwi/lwOyvtgxyD3IzDg+lVMKv9nIHb4xls89IoejvYwdC7+SkyE97lDMPYeFrb7Y6Qq/crUgvjVvvj2TO4e8jbcLvw50Fj0cwr09BJOOvmeHC79RaSO+MFqyPXXMKz2SWAy/TTC2vv4RtD1vuLg++SoOvyT1Jb4S2cI9XsvGPWb/Dr9bUAE95dLGPVoWKL4F1g6/mcAovq8ZwD17SyE+Bq4PvxJ37DxXjcY97lTVvTCID78z0GM+FUnCPV3Bur6WZA6/0A7VPlVYsz0GRyC/KUMMv5gtYT5fs5k9m0Odvu4iC79H68I8mh6NPTOqBjy+Awu/lBdfPsl0jT2PHoa+L+YJv27OsjwGuoI9LbtTPZTJCb9rKV0+DtiEPf14Yb59rgi/ufrRPm6mdz1ow/u+8ZQGvxpWWz40Xk89nvM4vjF8Bb+CLtE+ZZJAPfPv6b6wZAO/R+lZPlMkGz0/YRm+w00Cv4mUjDwa3w49GVAePkQ3Ar9D2Vg+WokbPazrAb6zIQG/uwnQPpQkET36jtC+Pxr+voXGVz4LjN88xmDUvd3x+77ATHk8io7OPHI0Sj75yfu+X5U4vtro7jws+f4+gqL9vvtlwL4iQCA9uLpMP8u9AL8pkDm+kMNhPY8EBT9RqwG/TXlWPDEqhj14qHo+KJoBvxgxVD7uMJA9P2bZvI2KAL91cs0+qRqPPeGnl743+fy+I2cYP7/4gj0F6RC/nOD2vjYcSj9zklc9RoJWvwDL7r5J/Bc/4u0SPXR3B7+rtui+39HLPuoozzwpz2a+HKPkvmN5Tz74Oqo8F+mZPfqP4r7ge8s+D4u2PA33V74kft6+EthOPiH9kzw7tbU9nmzcvoUwyz6ChqI8IPZKvkpc2L7QeBc/NQ2CPLYY+L43TdK+tu3KPo+kyjvkZz++OT7OvohkFz9BSSA78Y30vvYvyL7HVUk/+uLou6cBRb9KIsC+uWgXP0hOuLz8TvW+3BO6vpJwST/7ZgO92l9Hvx4Fsr7nmhc/uzNDvWwm/r6u9Ku+QrhJP7jda71Hv02/EuOjvnb7Fz9E2pa9o5gHv8bOnb5Locw+SYysvSSshb4Rt5m+8YsYP+Y9t70BQBS//ZyTvijjzT449s69lbyhvtl+j77PlFU+lebbvUxJaL0UXI2+zx14PD053r2nEE0+YTSNvli3WD5fBda99Sb/vZYJi75MupQ8wB/bvSyWCD7+2Yq+DslbPhqp1b0JoUO+WKeIvko9rTxYfN29/jKJPehviL7eZzC+4r3avd+6pT6BM4q+9zfGPLx7zb0yXTC6E/SJviBrLb7Kgs29I3qEPgewi74Qkd08qOnCvdm3gr0gaYu+uMFkPu+Gxb3lGMW+gh+Jvo7W8zx9S9W9P07+vXvRiL7Ppie+iGHavdsiCT6rfoq+u962vkLl1L0hRcg+9iaOvgqYJL6638S9omuKPVLMj76G6RE9BBvCvVjzgb7wbo++xdEhvmeAzL1/BPc7Mg2RvgQvHT1bMcy9PDuhvpmokL6s6x6+YBfZvS9TYr1vP5K+XYKyvsRa272GC08+Z9GVvnnGCr+gEtO9NSTrPnZem75F/LC+7kLAvS43Cz6g6J6+Ke0Yvl2xur2EPj2+HnCgvjOkr745Q8K9XkyfPWfzo76zZAm/nhO/vfudrT5Pcqm+uUWuvvAvsb0BpJc8lu6svt6VE77WbbC9aq+Zvmdorr4OBK2+T7m8venUEr0/3rG+kBcIvzMxvr3U1Gc+1E+3vpusOb8/67S99A/6PiK9vr4rbwe/+OmgvbFMLT76J8S+GnCqvmL7mb1YsRa+n5DHvgXgBr96AqC9lJP3Pb71zL5ZUqm+4g6bvVgTSL6rWNC+qVAGv6kPo70Ec5Q9DrjVvjgxqL6ZF6C9RhR6vjMV2b7Vhwe+aRiqvcd8D78ocNq+EganvqcNwb2KFpe+UcfdvsUWBb/wI8295HsJvSYa475+oza/5YPOve9UZz5eaOq+uFsEvw9Dxb2fP8a9uLPvvrzsNb8XOsm9ffEnPqD69r6ypgO/WoLCva7GIb68Pvy+Rzk1v/D6yL3HidM9rLI2PE0ZOj6GdGQ8d6mdvuQ/cjyQgV68YBr/O+ZaMryozG08dcg5PgX49zsZKpq+AaCUPGcaYrypjco6bQDGuy5dkjyA9VW+lra6OmMCkz7WQmA8b+TOvqnZ6jviixQ/M7O3O1YVVr5CyJk8SWaUPiK/ujocFM++J0XJPIOgFj+4X9q7lbZWvvHVFD0Japs+KuUxvKiDz761sy09kYIbPydam7w121e+FndfPSMoqD6g4728hBuHvMpeej2541o9YJfAvKaIWb6Bv349ccK6Po9l47zBgJW8llCOPd4TvT0EY+a8wQc0PqoYkj1k6jW++pTJvBhcvT7Z0Yo94d3kvqD8jLzMADI+UQVxPZr1CL48A2G8wnK8PmMQZj0JitC+vc/Qu9/1Dz+fskQ9/Icuvyy6nzvEsbs+B9kMPYyrv77v/Ec8TqsPP3Fc3Dyv7ye/OfG/POI/uz6Hw2E8ub61vrXc+zwTfy4+2+TaO/V2bb0K5As97Rm7Pk3mtDstdLK+tdMpPVVRLj7JFb66GbFdvbzFNz0Tusu84vslu8PQcz4xvDU9QGAuPpIZEjvS1WK9aq9DPfMSuz50BpM699ixvvedYT2tUy4+duO+u76AXr0ukG89vxu7Pi594rvKmrK+EsCGPWSDLj4tjWO80/duvRa7jT3rrci8Pat2vPtxaz43uow92e8uPpFTK7z3K4q9krmTPWVyuz4Vb0G8jBO6vnu4oj0+Qy8+6kKcvAeQmL0su6k9zKu7PmZ3qLxLC7++rb64PbDXLz67meW8ey+yvU3Hvz0HBrw++NrzvPLbxr4G0s49ArAwPsK+Gb0wi9e9TuPVPUZftLzsXSK9NJIzPm381D2Jv12+TwAUvaEa6T66Hcw9l4SrvLxo3bzWEBs+L0LLPSTAMj4+mcS87D4Zvpdo0j0Kc70+MB7dvJBN5r6DkOE9yX4zPk5oE70ZvCm+i77oPXTnvT55/CC9v2vwvsjv9z0DmDQ+HHRHvZAUQr4PKf89+LaTvOD6Vr1v/7I9/Gv+Pc51Wb7w0U+92OS5PjG59T2lpYe8uRMyvc6ZYD2RC/U9gBhYvsSVLb0fv6o+v2bsPR80e7z9QxK9L1LjPPnF6z1r9la+DP4PvVgvnj7CLOM98JxqvJVd7byzz7A7m5biPdoHVr5Ee+y8TN2TPu4G2j0xEV28NSq9vLuAUrxzedk9wlE6PhlFv7yoHaC+XO3gPXsxUrzJgfK8uEHhvNZm4D2mflS+GgP3vDTrgj7k5tc9hjNEvDwezbxLzT29UmnXPY/mOz7PtdS8uZGxvmzt3j1uCcI+JMQGvTzHJb9Mc+497cA8PrLQO71iGbu+IgD2PUyrJbxHwFm9wEyzvRuW9T3ZVlG+TuxgvZ1oQD55Nu09ESbMvseHUb2u7+w+guHcPbnKT77eniu9Jx0ePriR1D2Sesu+svgevbL+3T55SsQ92JxOvpLn9rxsBAQ+wga8PTH7yr4kyOG8uerSPrTJqz3RxE2+1UmevMy44j2hjqM9dKTKvpEmjLyTYcs+g1iTPVc8Tb4rIxa8ISrLPeYiiz2wham7GEPruzESSb6n7Io9Hf9Mvkn5NbzQncA9fbmCPZH5oLu4Jxe8UfdOvvqFgj3xsky+YWJZvDp+sz24q3Q9PifKvliqPLyRkcA+hVNUPUv8Fr/N14K7p0YqP8kCJD1PCsq+IogYPFQOvj43rwM9kGlMvnQViTzL2aY9tKnmPM5tm7uObpY889NSvsHi5TyJrEI+F2ZpPOZF/L5NhAI9Z62qu1njjzvJRki+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.8218080401420593, 0.5214031338691711, 0.8020656704902649, 0.4393726587295532, 0.8605348467826843, 0.37843596935272217, 0.11659073084592819, 0.9509966373443604, 0.9093955159187317, 0.7750025987625122, 0.5404054522514343, 0.17530225217342377, 0.9381769895553589, 0.8705894351005554, 0.6726978421211243, 0.6467463374137878, 0.2547138035297394, 0.9173524975776672, 0.8113523125648499, 0.43899378180503845, 0.8545355200767517, 0.6564990282058716, 0.6330134272575378, 0.7248623371124268, 0.554751455783844, 0.22242003679275513, 0.916826605796814, 0.8254109025001526, 0.3785548210144043, 0.8578136563301086, 0.694190263748169, 0.44027140736579895, 0.7742911577224731, 0.5131427645683289, 0.7728937268257141, 0.46035873889923096, 0.8010826110839844, 0.5884822010993958, 0.6715691685676575, 0.6175739765167236, 0.6515219211578369, 0.3627377152442932, 0.8444532155990601, 0.6638259887695312, 0.6092725396156311, 0.3237648606300354, 0.13984978199005127, 0.06951000541448593, 0.9559786915779114, 0.9348646998405457, 0.8877235054969788, 0.7829780578613281, 0.5870891809463501, 0.3448795974254608, 0.8308929800987244, 0.6584927439689636, 0.3929350972175598, 0.8162409663200378, 0.38085275888442993, 0.8228502869606018, 0.6297661066055298, 0.6454786062240601, 0.6234458684921265, 0.3373028337955475, 0.85651695728302, 0.29062625765800476, 0.8845455646514893, 0.22906357049942017, 0.9131240844726562, 0.8374058604240417, 0.34419429302215576, 0.8866040110588074, 0.7635060548782349, 0.49919307231903076, 0.779141902923584, 0.6160060167312622, 0.7008429765701294, 0.7241991758346558, 0.40860262513160706, 0.8400617837905884, 0.5190966129302979, 0.7886542081832886, 0.3732517957687378, 0.8963271975517273, 0.7419912219047546, 0.4069167971611023, 0.8473685383796692, 0.4897053837776184, 0.8626546263694763, 0.36847802996635437, 0.90073162317276, 0.7415105700492859, 0.40113651752471924, 0.8482184410095215, 0.5064860582351685, 0.8527261018753052, 0.6009272336959839, 0.740074872970581, 0.3205355107784271, 0.9082337617874146, 0.7596692442893982, 0.4305516481399536, 0.8323642611503601, 0.50350421667099, 0.8401395082473755, 0.42523887753486633, 0.8696585297584534, 0.34987014532089233, 0.10659728199243546, 0.9558961391448975, 0.917158305644989, 0.7981064319610596, 0.4780065417289734, 0.8402979373931885, 0.39332085847854614, 0.8721705675125122, 0.5617526769638062, 0.8077872395515442, 0.5643998980522156, 0.8086965084075928, 0.4406306743621826, 0.13279876112937927, 0.9484320282936096, 0.11702224612236023, 0.9529115557670593, 0.09847976267337799, 0.9577186703681946, 0.9198662638664246, 0.22036124765872955, 0.9326434135437012, 0.8307737708091736, 0.5276150107383728, 0.7980259656906128, 0.5837188363075256, 0.23010019958019257, 0.9141104817390442, 0.23539629578590393, 0.9159011840820312, 0.7745755910873413, 0.6059170961380005, 0.7809572219848633, 0.40703797340393066, 0.8745629191398621, 0.5971546173095703, 0.7890209555625916, 0.5786330103874207, 0.8034321665763855, 0.450589120388031, 0.8572885394096375, 0.5372458696365356, 0.8267911076545715, 0.4997854232788086, 0.8459077477455139, 0.45025214552879333, 0.8666173219680786, 0.6103419661521912, 0.23132109642028809, 0.9207680821418762, 0.7563594579696655, 0.33477190136909485, 0.8999382853507996, 0.28365153074264526, 0.9139088988304138, 0.7710989713668823, 0.4086507260799408, 0.8630256056785583, 0.4528726041316986, 0.84818035364151, 0.48957473039627075, 0.8347147107124329, 0.5189793109893799, 0.8230935335159302, 0.4582933187484741, 0.8597542643547058, 0.5858425498008728, 0.7847754955291748, 0.3888317942619324, 0.1180666983127594, 0.949183464050293, 0.9009569883346558, 0.7317249774932861, 0.3664522171020508, 0.8732879161834717, 0.39784175157546997, 0.865296483039856, 0.4176570177078247, 0.8613036870956421, 0.4257396161556244, 0.8616520166397095, 0.5778342485427856, 0.7956264019012451, 0.5651050209999084, 0.8032649159431458, 0.45069071650505066, 0.14573746919631958, 0.9439524412155151, 0.8720064759254456, 0.6130011081695557, 0.23096176981925964, 0.9187437295913696], "advantages": [18.04481315612793, 18.122724533081055, 18.112546920776367, 18.27110481262207, 18.169818878173828, 18.1669979095459, 18.140079498291016, 17.90390968322754, 17.8856201171875, 17.503557205200195, 16.959810256958008, 17.313705444335938, 17.5122013092041, 16.872713088989258, 16.0660457611084, 15.264167785644531, 15.776183128356934, 16.2425594329834, 15.203315734863281, 14.150713920593262, 14.728204727172852, 13.546517372131348, 12.5874662399292, 13.054508209228516, 11.968838691711426, 12.472203254699707, 13.175515174865723, 11.600423812866211, 10.209488868713379, 10.836267471313477, 9.275181770324707, 8.282868385314941, 8.299612045288086, 7.5688982009887695, 7.768186092376709, 6.547183990478516, 6.773626327514648, 5.4381022453308105, 5.1327643394470215, 4.48811674118042, 4.123455047607422, 3.48036789894104, 3.7074623107910156, 2.173457384109497, 1.7211731672286987, 1.04789137840271, 1.2491357326507568, 2.0297019481658936, 2.648629665374756, 0.4999796152114868, -2.145616292953491, -4.141756534576416, -4.900667667388916, -4.34340763092041, -2.423415184020996, -5.454281806945801, -7.364708423614502, -8.143362998962402, -8.946764945983887, -9.780247688293457, -10.558634757995605, -9.931825637817383, -11.977564811706543, -11.276748657226562, -9.09858512878418, -12.306340217590332, -9.893783569335938, -13.164565086364746, -10.436964988708496, -13.77761459350586, -16.078516006469727, -14.402709007263184, -16.776628494262695, -18.236873626708984, -19.182212829589844, -19.335824966430664, -20.306180953979492, -20.292177200317383, -21.272266387939453, -22.08904266357422, -22.37391471862793, -23.158414840698242, -23.394296646118164, -23.037290573120117, -24.040708541870117, -24.73094367980957, -25.5306453704834, -25.742321014404297, -25.868749618530273, -26.452571868896484, -26.53635025024414, -27.080211639404297, -27.55298614501953, -28.36653709411621, -28.488910675048828, -28.939193725585938, -29.194520950317383, -29.731197357177734, -30.112783432006836, -30.659534454345703, -30.804677963256836, -31.074373245239258, -31.809494018554688, -32.06523132324219, -32.81196212768555, -32.91990280151367, -33.72016525268555, -33.81428909301758, -34.620662689208984, -34.7878532409668, -35.28911209106445, -35.541778564453125, -35.91837692260742, -36.66606521606445, -37.08808898925781, -37.8045539855957, 16.902423858642578, 17.206523895263672, 16.650371551513672, 16.95181655883789, 16.39695930480957, 16.690797805786133, 18.187639236450195, 16.366186141967773, 17.901554107666016, 16.073989868164062, 17.70115089416504, 15.82987117767334, 15.12055778503418, 15.648594856262207, 14.837496757507324, 14.747261047363281, 15.27219009399414, 14.402836799621582, 14.8204345703125, 16.16831398010254, 14.439570426940918, 15.732061386108398, 14.11117172241211, 13.509349822998047, 13.782867431640625, 13.218619346618652, 13.371761322021484, 12.904123306274414, 13.116780281066895, 12.616473197937012, 12.812353134155273, 12.32984733581543, 12.581377983093262, 12.007198333740234, 12.177565574645996, 11.719172477722168, 11.891339302062988, 11.42978572845459, 11.618071556091309, 11.13872241973877, 11.449567794799805, 12.829580307006836, 11.018871307373047, 10.369399070739746, 10.614158630371094, 10.061699867248535, 10.359978675842285, 9.756139755249023, 10.00848388671875, 11.305398941040039, 9.487624168395996, 10.715190887451172, 8.963594436645508, 10.123252868652344, 8.43553638458252, 9.529356956481934, 7.903004169464111, 7.468772888183594, 7.441303253173828, 8.418562889099121, 6.888324737548828, 6.555356979370117, 7.442780494689941, 6.237778663635254, 6.016102313995361, 6.776116371154785, 8.6322603225708, 5.965451717376709, 7.700962066650391, 5.185009479522705, 6.796955108642578, 4.431941986083984, 5.923365592956543, 3.7026560306549072, 2.9259538650512695, 3.0685935020446777, 2.378781318664551, 2.437511444091797, 3.626401662826538, 5.901596546173096, 2.734513998031616, 0.9856656193733215, 0.5314849019050598, 1.1544876098632812]}
+{"type": "SampleBatch", "eps_id": [1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1693691551, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAE2EAj1nraq7WeOPO8lGSL4RFwI93DNNvrRg+zkfsck9CVnjPKfzrLtJgSA72bRGvqh74jzTPk2+r627uvuUyz3OpME8voPKvhrQETqGhMg+ztaAPII7Tb7ScQk87QPLPfgAQDyep7G7VO0pPDZ6Q74sOj48x4FNvqW/1juUH9c9F+74Oz+QubtGyw08YgU+vgE49Tu8uk2+gfmhOybw4D2XGmM7Duu/u2706Tscozm+WW1bO/znTb5IS2Y7TLzoPfmHMLrM5cS7Vp+9O8IzNr7nCFC6CZFBPqkGEjtnBPC+qMFDO+6nyLuCNeq72ZszvvG6OztL+E2+TpQuvM+P6z2L0pe6887KvrLjCLxgA88+csYUvAyxTb711Iy5ekTfPbOYVryuP7y7DZP6OgwpPL6eeli8MrdNvrUd57rrU+A9ayeNvE6/yr4dGOA5iKbNPnoIzrwmsk2+iZ4KPKZ23z3I8e68zoTAu5lfLjzgOjm+NejvvI6hQT4eM+Y7bb3wvhTt0Ly0qsQ+GeUbu1S6Qr8i/pG8+I9BPtkckLxa/O++ogtmvDV5v7t76Ny8q/45vs71Z7wEYk2+1Kr6vJLB0T1j15S8CpOkuwbj6bxEjUy+CqqVvJ2ETL67TgW96JarPRljtrxA6sm+UOP8vHBnuz76//a8GJdLvizrwLw8mYI9hMkLvWmpXLuDeLa8jutxviEQDL1y60q+kC3dvGcISj3tSxy9Wawuu78Y1bzN5YC+0oMcveIkSr4LWP68w5QFPbuvLL1Zvci+LAD5vER0oT4Hzky9rj5JvtlVxbyavFg8hedcvQRYyL4AK8O8oK+YPpr1fL0fi0i+9k6SvG8j97pegIa95EX2uQuekrwzrZm+S4WGvSEFSL46y8O8v69XvH+Fjr2du8e+Y/PFvDc0iz4HgJ69A1FHvsxnmby3BOi8B3mmvQ0bQjq7C568eVmnvkNxpr2Rwka+AZnTvAEkJb2RZK69kwS1Ogs02rxgnK6+FVauvRD+Rb4VCgm9hPFovYdBtr2oXBI7wLINvVRHuL4cKra9VP9Evs4uK73kcaC9XAu+vf8Uxr7DmTG9O7BNPhXkzb2jwEO+QyUhvVFk172WuNW9onrFvuDCKb1ECjM+9YTlvWaIFL8icBu94ojoPtxI/b025sS+BnfsvF5dGT6uhAa+M5VBvjTt07zKhRu+02MKvth/xL5pz+y8vbYHPvo/Er6IFxS/kxjXvGj41D7mGB6+lBjEvgvykrzswOs97PAlvqnuE7/PFYC8/N3NPpPGMb6x2cO+stT4u/4K1j0VnDm+Bsk/vkxWtLsTIUO+BnI9vinBw74inBi8HpfNPY1GRb5DyxO/Tm7vuzu9xz5fGVG+AqLDvtzhATqn18I9p+xYvvvEE79xKx0776TGPvi+ZL5qp8O+vWwmPIe2xD13kmy+qtATvxrmRTzyqcg+uGV4vgrRw75vKaM8CBfTPXEdgL5RIEC+jgy0PAGpO75JCYK+fB/Evv8FljxbIO49b/WFviIUFL/TEqk8flDUPsLhi75eaMS+nwPtPCOoAz5ez4++Z3JBviQKAT3Pjx6+l76RvsQ4vDuYteg8Pvbfvoivkb6ZUkK+owqhPOgvC77/oJO+fzTFvojFijzSzSY+sJKXvpPbQr7SdaU8nsP+vYaFmb73ecW+QRSRPJjHMj6aeJ2+e2lDvhGvrTx2Tua93GyfvrNogjtkQps8oPfLvm1in76DmUs+yPozPJBdL782WZ2+B/xoO+3zMbv2Hce+5E+dvvsfRL5R7Cu8M8/GvfhFn74Z48W+jLtLvMrkRD4nO6O+GcpDvgS6DLyAm9W9YDClvs27xb5g5y68th0+PsUkqb7ygUO+IyLku0QK4r1FGau+UMKMO6w7Frzve8++Ag6rvpNFQ77vgo28nHjsvecBrb49B5k73W2gvGm8076p9ay+pLtCvkYv5LzvJgK+Luiuvh8Pxb5OAvm8yWwgPh/Zsr6k4UG+TFffvOrwFL51ybS+T6PEvuwr97wq1Q0+Pri4vsMKQb51euC8qHYnvm6mur6dN8S+w0X7vFaH9j0Qk76+qfETv9uM57yWc84+An7EvkbJw75ZfKW8PGvQPW9oyL7NxBO/7s+UvBepxj6WUc6+aIHDvlJ7KryknLc9kzrSvleqE7+UGg28CBHCPqsi2L7DXcO+wDWHutZQqz3yCty+m+k+vv8lKDrtYVa+rvPdvgddw75eX2i7VxCrPfHb4b673j6+isj1uptRV76RxOO+U2UPPPY/x7tm6AC/oK3jvh3BPr4/UIS8eORZvvWV5b7VKMO+Hy2nvDcbmT0sfem+X3QTv4HtmrxazLg+G2Pvvinfwr6plT+852B/PdpI877XxD2+gicrvPGdb76pLvW+rbXCvu7Ud7w4ymI9khP5vu0/E79GsGW8Rr2vPmn3/r4Of8K+Rm7quy0dPT2dbQG/XRU9vioszLs+u36+D6b3uTeAmTyO0ag8lkO4upurzLji6TS+l5aoPBRtmD4i9227nK2UPFZd2Tx1zz08Ry1Wu2r9WT5AQ9s8dpOLvk+zgTrcb448L5muPCSa6Dy+R686PEpZPhtAszyc1YO+uuK2O1Fi0D47EIk8jg4LvwHPYDwAFRo/oETAO0ifVL9OBNM8iDjQProFMLx0OQm/4NIKPZIZGj+31a+8TQNVv70iPD1pc9A+3xQcvXLVC7/ifF09ZqpZPg7USL1pFYi+reZuPUoT0T4Lml69dtISvzMtiD3fKls+28qGvcDNmL568ZA9QBGjPEYEk71hBN+8NMKRPc9LMr68IZS9IMh3PnOgij1DfLy+dDiKvYbQAj+KGHc9HUMwvpOUar3fk0o+sP5oPViXu77JX1q9Rp7xPvT6Sj1Jpi6+GrczvV/SJj4hAj09XOS6vpheJr3+A+I+CBsfPVhqLb7+NAK9WIALPnw7ET2PudY8AxjuvK1MKr4uYRM9s4Ysvr2rBL1SwO892JMFPenn3TxgKfa87xw+vu3LBz0NnSu+NEoKvQ94xz2PIvQ8ylvlPKRPAr1/rlK+4Lj4POnsZD5mKhO9UB8Cv9WsDj0aQu08Bc48vSWYaL42DBE9bXEpvo1pT72Nd089BX4DPSGI+DxSQ0u9C+CDvkL6BT1x+ie+7VxgvdsfnDwhFPE853e3vkDNXr34gpY+Zl62PPdhJr5LuEa9cLF7vF2/mzyRuLa+dvpHvQr0hT66jUI8+h8Nv7yLMr123Ak/tJ5eOkUMtr4mbga9cwFuPqExzbvu2Qy/lcfmvIG7Az97cY28Wpq1vop4krwbRVo+ZY7HvNokI75nGF+8n1CuvcWo4byGX7W+V/x6vHgeUD5u2Q29dbgivjtjOLz+/cC98t0aveAttb4xRFe8wI1HPgzbN70WXSK+wGgXvGi80L1B2ES9b00WPZPOOLzohcu+tNZBvUUQIr7mh528IAHevcTNTr1cyrS+jEqvvPxtNj7xumu9NEQMvzsakrzJku0+yU6MvfOAtL5wKAy89rwpPoC/mr0FFCG++66ru1a4BL7xMKG9ZjYbPetPALzBDtm+maOfvSPjIL5SnYW8VPIIvhUTpr17ObS+p4abvDhyHT4VfrS9G/4Lv6hVgrzAeuE+LeTKvSf4s76Duei76SgSPvNJ2b3g6Au/sC6LuzTI3T6mrO+94OKzvtuykDshfQ4+uBD+vRTpC7835Os7Rc/dPro5Cr6W+LO+s/OBPKM+Ej6hbBG+nF8gvuJZmTyISxS+vqEUvnybHD24n4E8l/LcvknZE7734CC+wa7rO5gfCb78EBe+6mS0vnTskzsI5iQ+OEgevssMIb5vdf070VgFvsyAIb7xjxo93x2oO8hF1771uiC+BT0hvk3dVruoLwG+f/Qjvlx/tL6DHL67zHUpPsosK74GGyG+UlAju5gdBL6nZS6+2G+0vvs1prsdySY+U501vrT+IL5O3+26To4Gvp7VOL72YrS+dpWRu4iQJD7HDEC+R+cgvs4MoboYkwi+mkRDvlpYtL4qV3+7S7wiPlZ7Sr4Y1CC+9ic8ulM6Cr7Hsk2+uU+0vn34X7tdPyE+K+lUvp3EIL40l4y5048Lvk0gWL7USLS+hDZEuxIPID5qVl++Z7ggvi+ZCjk2nQy+TY1ivnlDtL7bUiu7yyIfPjPDab4cryC+MXsBOi1qDb7n+Wy+VHQcPfyjFLsye9y+pDFsvoeoIL6kRDK8+fwNvjZob76kJrS+ULRfvBIvGj71nHa+sEwgvpJdLrzw5hW+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAABEXAj3cM02+tGD7OR+xyT0JWeM8p/Osu0mBIDvZtEa+qHviPNM+Tb6vrbu6+5TLPc6kwTy+g8q+GtAROoaEyD7O1oA8gjtNvtJxCTztA8s9+ABAPJ6nsbtU7Sk8NnpDviw6PjzHgU2+pb/WO5Qf1z0X7vg7P5C5u0bLDTxiBT6+ATj1O7y6Tb6B+aE7JvDgPZcaYzsO67+7bvTpOxyjOb5ZbVs7/OdNvkhLZjtMvOg9+YcwuszlxLtWn707wjM2vucIULoJkUE+qQYSO2cE8L6owUM77qfIu4I16rvZmzO+8bo7O0v4Tb5OlC68z4/rPYvSl7rzzsq+suMIvGADzz5yxhS8DLFNvvXUjLl6RN89s5hWvK4/vLsNk/o6DCk8vp56WLwyt02+tR3nuutT4D1rJ428Tr/Kvh0Y4DmIps0+egjOvCayTb6Jngo8pnbfPcjx7rzOhMC7mV8uPOA6Ob416O+8jqFBPh4z5jttvfC+FO3QvLSqxD4Z5Ru7VLpCvyL+kbz4j0E+2RyQvFr8776iC2a8NXm/u3vo3Lyr/jm+zvVnvARiTb7Uqvq8ksHRPWPXlLwKk6S7BuPpvESNTL4KqpW8nYRMvrtOBb3olqs9GWO2vEDqyb5Q4/y8cGe7Pvr/9rwYl0u+LOvAvDyZgj2EyQu9aalcu4N4tryO63G+IRAMvXLrSr6QLd28ZwhKPe1LHL1ZrC67vxjVvM3lgL7Sgxy94iRKvgtY/rzDlAU9u68svVm9yL4sAPm8RHShPgfOTL2uPkm+2VXFvJq8WDyF51y9BFjIvgArw7ygr5g+mvV8vR+LSL72TpK8byP3ul6Ahr3kRfa5C56SvDOtmb5LhYa9IQVIvjrLw7y/r1e8f4WOvZ27x75j88W8NzSLPgeAnr0DUUe+zGeZvLcE6LwHeaa9DRtCOrsLnrx5Wae+Q3GmvZHCRr4BmdO8ASQlvZFkrr2TBLU6CzTavGCcrr4VVq69EP5FvhUKCb2E8Wi9h0G2vahcEjvAsg29VEe4vhwqtr1U/0S+zi4rveRxoL1cC769/xTGvsOZMb07sE0+FeTNvaPAQ75DJSG9UWTXvZa41b2iesW+4MIpvUQKMz71hOW9ZogUvyJwG73iiOg+3Ej9vTbmxL4Gd+y8Xl0ZPq6EBr4zlUG+NO3TvMqFG77TYwq+2H/EvmnP7Ly9tgc++j8SvogXFL+TGNe8aPjUPuYYHr6UGMS+C/KSvOzA6z3s8CW+qe4Tv88VgLz83c0+k8YxvrHZw76y1Pi7/grWPRWcOb4GyT++TFa0uxMhQ74Gcj2+KcHDviKcGLwel809jUZFvkPLE79Obu+7O73HPl8ZUb4CosO+3OEBOqfXwj2n7Fi++8QTv3ErHTvvpMY++L5kvmqnw769bCY8h7bEPXeSbL6q0BO/GuZFPPKpyD64ZXi+CtHDvm8pozwIF9M9cR2AvlEgQL6ODLQ8Aak7vkkJgr58H8S+/wWWPFsg7j1v9YW+IhQUv9MSqTx+UNQ+wuGLvl5oxL6fA+08I6gDPl7Pj75nckG+JAoBPc+PHr6XvpG+xDi8O5i16Dw+9t++iK+RvplSQr6jCqE86C8Lvv+gk75/NMW+iMWKPNLNJj6wkpe+k9tCvtJ1pTyew/69hoWZvvd5xb5BFJE8mMcyPpp4nb57aUO+Ea+tPHZO5r3cbJ++s2iCO2RCmzyg98u+bWKfvoOZSz7I+jM8kF0vvzZZnb4H/Gg77fMxu/Ydx77kT52++x9EvlHsK7wzz8a9+EWfvhnjxb6Mu0u8yuREPic7o74ZykO+BLoMvICb1b1gMKW+zbvFvmDnLry2HT4+xSSpvvKBQ74jIuS7RArivUUZq75Qwow7rDsWvO97z74CDqu+k0VDvu+CjbyceOy95wGtvj0HmTvdbaC8abzTvqn1rL6ku0K+Ri/kvO8mAr4u6K6+Hw/Fvk4C+bzJbCA+H9myvqThQb5MV9+86vAUvnXJtL5Po8S+7Cv3vCrVDT4+uLi+wwpBvnV64Lyodie+bqa6vp03xL7DRfu8Vof2PRCTvr6p8RO/24znvJZzzj4CfsS+RsnDvll8pbw8a9A9b2jIvs3EE7/uz5S8F6nGPpZRzr5ogcO+UnsqvKSctz2TOtK+V6oTv5QaDbwIEcI+qyLYvsNdw77ANYe61lCrPfIK3L6b6T6+/yUoOu1hVr6u892+B13Dvl5faLtXEKs98dvhvrvePr6KyPW6m1FXvpHE475TZQ889j/Hu2boAL+greO+HcE+vj9QhLx45Fm+9ZXlvtUow74fLae8NxuZPSx96b5fdBO/ge2avFrMuD4bY+++Kd/CvqmVP7znYH892kjzvtfEPb6CJyu88Z1vvqku9b6ttcK+7tR3vDjKYj2SE/m+7T8Tv0awZbxGva8+aff+vg5/wr5Gbuq7LR09PZ1tAb9dFT2+KizMuz67fr6kXwK/+GXCvq6ZN7y00ys9m6vMuOLpNL6Xlqg8FG2YPiL3bbucrZQ8Vl3ZPHXPPTxHLVa7av1ZPkBD2zx2k4u+T7OBOtxvjjwvma48JJroPL5Hrzo8Slk+G0CzPJzVg7664rY7UWLQPjsQiTyODgu/Ac9gPAAVGj+gRMA7SJ9Uv04E0zyIONA+ugUwvHQ5Cb/g0go9khkaP7fVr7xNA1W/vSI8PWlz0D7fFBy9ctULv+J8XT1mqlk+DtRIvWkViL6t5m49ShPRPguaXr120hK/My2IPd8qWz7byoa9wM2YvnrxkD1AEaM8RgSTvWEE37w0wpE9z0syvrwhlL0gyHc+c6CKPUN8vL50OIq9htACP4oYdz0dQzC+k5Rqvd+TSj6w/mg9WJe7vslfWr1GnvE+9PpKPUmmLr4atzO9X9ImPiECPT1c5Lq+mF4mvf4D4j4IGx89WGotvv40Ar1YgAs+fDsRPY+51jwDGO68rUwqvi5hEz2zhiy+vasEvVLA7z3YkwU96efdPGAp9rzvHD6+7csHPQ2dK740Sgq9D3jHPY8i9DzKW+U8pE8CvX+uUr7guPg86exkPmYqE71QHwK/1awOPRpC7TwFzjy9JZhovjYMET1tcSm+jWlPvY13Tz0FfgM9IYj4PFJDS70L4IO+QvoFPXH6J77tXGC92x+cPCEU8Tznd7e+QM1evfiClj5mXrY892Emvku4Rr1wsXu8Xb+bPJG4tr52+ke9CvSFPrqNQjz6Hw2/vIsyvXbcCT+0nl46RQy2viZuBr1zAW4+oTHNu+7ZDL+Vx+a8gbsDP3txjbxamrW+iniSvBtFWj5ljse82iQjvmcYX7yfUK69xajhvIZftb5X/Hq8eB5QPm7ZDb11uCK+O2M4vP79wL3y3Rq94C21vjFEV7zAjUc+DNs3vRZdIr7AaBe8aLzQvUHYRL1vTRY9k844vOiFy7601kG9RRAivuaHnbwgAd69xM1OvVzKtL6MSq+8/G02PvG6a700RAy/OxqSvMmS7T7JToy984C0vnAoDLz2vCk+gL+avQUUIb77rqu7VrgEvvEwob1mNhs9608AvMEO2b6Zo5+9I+MgvlKdhbxU8gi+FROmvXs5tL6nhpu8OHIdPhV+tL0b/gu/qFWCvMB64T4t5Mq9J/izvoO56LvpKBI+80nZveDoC7+wLou7NMjdPqas773g4rO+27KQOyF9Dj64EP69FOkLvzfk6ztFz90+ujkKvpb4s76z84E8oz4SPqFsEb6cXyC+4lmZPIhLFL6+oRS+fJscPbifgTyX8ty+SdkTvvfgIL7Brus7mB8JvvwQF77qZLS+dOyTOwjmJD44SB6+ywwhvm91/TvRWAW+zIAhvvGPGj3fHag7yEXXvvW6IL4FPSG+Td1Wu6gvAb5/9CO+XH+0voMcvrvMdSk+yiwrvgYbIb5SUCO7mB0EvqdlLr7Yb7S++zWmux3JJj5TnTW+tP4gvk7f7bpOjga+ntU4vvZitL52lZG7iJAkPscMQL5H5yC+zgyhuhiTCL6aREO+Wli0vipXf7tLvCI+VntKvhjUIL72Jzy6UzoKvseyTb65T7S+ffhfu10/IT4r6VS+ncQgvjSXjLnTjwu+TSBYvtRItL6ENkS7Eg8gPmpWX75nuCC+L5kKOTadDL5NjWK+eUO0vttSK7vLIh8+M8NpvhyvIL4xewE6LWoNvuf5bL5UdBw9/KMUuzJ73L6kMWy+h6ggvqREMrz5/A2+NmhvvqQmtL5QtF+8Ei8aPvWcdr6wTCC+kl0uvPDmFb6x0Xm+L/mzvo1VXrwJWRI+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.7756350636482239, 0.612125813961029, 0.7756436467170715, 0.3885174095630646, 0.8815892934799194, 0.6356493234634399, 0.7581368088722229, 0.6445144414901733, 0.7527977228164673, 0.6516579389572144, 0.7484222650527954, 0.6572870016098022, 0.2550281286239624, 0.9149109125137329, 0.7610495686531067, 0.36838531494140625, 0.8866824507713318, 0.6457452774047852, 0.7557428479194641, 0.354793906211853, 0.8928549289703369, 0.668721079826355, 0.26495277881622314, 0.08777742087841034, 0.9563407301902771, 0.9214736819267273, 0.7911956906318665, 0.5715227723121643, 0.8113599419593811, 0.46872013807296753, 0.8444793224334717, 0.5148441195487976, 0.8360649943351746, 0.4795854985713959, 0.8509207963943481, 0.5608783960342407, 0.7989695072174072, 0.5798503160476685, 0.7905892133712769, 0.4081323742866516, 0.8741276264190674, 0.623587965965271, 0.7611796855926514, 0.362978458404541, 0.8880791068077087, 0.33047470450401306, 0.8981094360351562, 0.29254063963890076, 0.9087594747543335, 0.7488438487052917, 0.6212868094444275, 0.7758610248565674, 0.4188234508037567, 0.8572765588760376, 0.567042350769043, 0.8044984340667725, 0.4615945816040039, 0.8433027267456055, 0.46471813321113586, 0.8454596996307373, 0.5439815521240234, 0.8119010329246521, 0.4629794657230377, 0.8489465117454529, 0.44517841935157776, 0.8587840795516968, 0.41622641682624817, 0.8716719150543213, 0.623212456703186, 0.7551260590553284, 0.3523470163345337, 0.8933494091033936, 0.6929872632026672, 0.30915963649749756, 0.8949166536331177, 0.680034875869751, 0.729566752910614, 0.6561221480369568, 0.7493543028831482, 0.3704221546649933, 0.12344934046268463, 0.9468966126441956, 0.8860577344894409, 0.6618601679801941, 0.7321107387542725, 0.6663256287574768, 0.7279475331306458, 0.3306964635848999, 0.8942685127258301, 0.30627503991127014, 0.9030520915985107, 0.726478636264801, 0.6553636193275452, 0.7468454241752625, 0.6281380653381348, 0.7662988901138306, 0.4013173282146454, 0.8649280667304993, 0.4087170958518982, 0.8637696504592896, 0.40591245889663696, 0.8660423159599304, 0.6065399050712585, 0.7637236714363098, 0.6049522161483765, 0.23537707328796387, 0.9175612330436707, 0.7841527462005615, 0.43730372190475464, 0.8495862483978271, 0.562748908996582, 0.7911516427993774, 0.4525795578956604, 0.8426398634910583, 0.551680862903595, 0.7927470803260803, 0.35888081789016724, 0.8957231640815735, 0.6768782138824463, 0.7230063676834106, 0.6946155428886414, 0.29071521759033203, 0.09575942158699036, 0.9542697072029114, 0.08424267917871475, 0.9584065675735474, 0.9297118782997131, 0.17087379097938538, 0.9403315782546997, 0.8702080249786377, 0.6345273852348328, 0.26655271649360657, 0.9053779244422913, 0.30668720602989197, 0.8960314393043518, 0.3381303548812866, 0.8890500068664551, 0.6409712433815002, 0.7704555988311768, 0.604201078414917, 0.7936940789222717, 0.5633335709571838, 0.18421390652656555, 0.934615969657898, 0.8475006222724915, 0.4323859214782715, 0.8700407147407532, 0.6335121393203735, 0.7338801622390747, 0.6772472858428955, 0.3027171492576599, 0.8957626819610596, 0.31503212451934814, 0.8953123092651367, 0.6868011951446533, 0.7152880430221558, 0.678637683391571, 0.7223745584487915, 0.6722382307052612, 0.2721746265888214, 0.9104246497154236, 0.7503911852836609, 0.3685230016708374, 0.8811478018760681, 0.6412357687950134, 0.24763242900371552, 0.9166780710220337, 0.7716503143310547, 0.3969353437423706, 0.8723196387290955, 0.38646960258483887, 0.8790027499198914, 0.3632466793060303, 0.8891210556030273, 0.6716672778129578, 0.278555303812027, 0.9067665338516235, 0.7231478095054626, 0.6862280368804932, 0.2860000431537628, 0.9065383076667786, 0.7248930335044861, 0.6793040037155151, 0.7249322533607483, 0.679607093334198, 0.7244158983230591, 0.680435061454773, 0.7234086394309998, 0.6817116141319275, 0.7219614386558533, 0.6833726763725281, 0.720114529132843, 0.6853653192520142, 0.717898428440094, 0.6876468658447266, 0.2846648395061493, 0.9079360961914062, 0.7332726120948792, 0.6636269092559814, 0.740626871585846], "advantages": [-0.9090890884399414, -1.127325415611267, -1.4979877471923828, -1.7770287990570068, -0.9555240273475647, -2.489248037338257, -2.7517693042755127, -3.15179705619812, -3.3717381954193115, -3.820936679840088, -3.996267795562744, -4.496857166290283, -4.625176906585693, -3.5213992595672607, -5.18390417098999, -5.796466827392578, -5.304215431213379, -6.538675785064697, -6.4541015625, -7.231523036956787, -6.871771335601807, -7.994811058044434, -7.805431842803955, -6.283897399902344, -3.234347105026245, -6.558821678161621, -8.778098106384277, -9.84870719909668, -9.216177940368652, -10.44337272644043, -10.484763145446777, -11.097777366638184, -10.046831130981445, -11.628363609313965, -10.332232475280762, -12.08273696899414, -12.596323013305664, -12.58414077758789, -13.297853469848633, -13.064221382141113, -11.137035369873047, -13.369363784790039, -14.462318420410156, -13.730950355529785, -11.508196830749512, -13.889410018920898, -11.53868293762207, -13.903693199157715, -11.468234062194824, -13.756550788879395, -15.538286209106445, -13.697656631469727, -15.57264518737793, -16.732980728149414, -15.741827964782715, -13.910992622375488, -15.761367797851562, -17.291433334350586, -15.994025230407715, -17.592037200927734, -16.347126007080078, -15.096712112426758, -16.604503631591797, -18.157657623291016, -17.127408981323242, -18.591249465942383, -17.779804229736328, -19.132686614990234, -18.557809829711914, -18.199935913085938, -19.238386154174805, -20.373611450195312, -20.18935203552246, -20.186275482177734, -19.96440887451172, -20.91289710998535, -21.710935592651367, -21.88909339904785, -22.617202758789062, -22.915283203125, -22.948945999145508, -22.311323165893555, -23.52683448791504, -24.237096786499023, -24.829599380493164, -25.225664138793945, -25.7595272064209, -26.241615295410156, -26.46504020690918, -27.02013397216797, -27.275028228759766, -27.77414321899414, -28.14527130126953, -28.757505416870117, -29.06433868408203, -29.748523712158203, -29.99290657043457, -30.538898468017578, -31.161794662475586, -31.647903442382812, -32.37772750854492, -32.81642150878906, -33.64552307128906, -34.683815002441406, -34.75780487060547, -35.85243606567383, -36.564735412597656, -36.82870101928711, -36.87998962402344, -37.2624626159668, -38.26713943481445, -39.47517395019531, -39.5224723815918, -39.87834930419922, -40.99521255493164, -42.26863098144531, 10.817754745483398, 11.002147674560547, 10.40869426727295, 10.572393417358398, 10.015243530273438, 10.132020950317383, 11.14441204071045, 13.743602752685547, 10.855884552001953, 13.747347831726074, 10.741920471191406, 9.208016395568848, 10.72360897064209, 9.030326843261719, 8.344115257263184, 8.487646102905273, 9.67080307006836, 7.91990852355957, 8.974520683288574, 7.3722710609436035, 8.30816650390625, 6.837972164154053, 6.667980670928955, 6.378802299499512, 6.317411422729492, 5.931929111480713, 5.999307632446289, 7.51806116104126, 5.8334760665893555, 5.213784694671631, 5.706218242645264, 4.912362575531006, 5.191457271575928, 4.591506004333496, 4.683869361877441, 5.95835018157959, 4.115170955657959, 5.216538429260254, 3.5854387283325195, 3.6977410316467285, 3.1883649826049805, 3.5112197399139404, 2.8282289505004883, 3.3661630153656006, 5.586302280426025, 3.419462203979492, 2.3931772708892822, 2.6711783409118652, 2.0857045650482178, 3.3379178047180176, 5.920799732208252, 3.55290150642395, 2.003239393234253, 1.6574945449829102, 1.8609412908554077, 1.3016539812088013, 1.6781538724899292, 0.9470158219337463, 1.4215353727340698, 3.134845018386841, 5.222073078155518, 3.1220991611480713, 1.2644894123077393, 2.969377040863037, 4.856164932250977, 3.004380464553833, 1.257417917251587, 2.876025676727295, 1.206418752670288, 2.7077701091766357, 1.131942868232727, 2.4993934631347656, 1.0279994010925293, 2.251680374145508, 0.8901981711387634, 1.9660530090332031, 0.715822160243988, 1.6443406343460083, 0.5035478472709656, 1.2885972261428833, 2.169264316558838, 1.168209195137024, 0.23358669877052307, 0.8329963684082031]}
+{"type": "SampleBatch", "eps_id": [1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1085404201, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAALHReb4v+bO+jVVevAlZEj5PgoC+EfIfvr2AL7wsth2+xRuCvr7Ls755+GG8AYQKPlO0hb5CzAu/RqU1vA/e2D7bS4u+JJ2zvtxmK7tCeAI+euOOvp+/C78A0Yy4XarWPoF6lL4YmLO+KUkIPBuaAT4GEpi+TccLvyjCMTwJ/tc+W6mdvvK7s74r/508xcwHPphBob6k8x++iLmzPAadHb4S26K+Ggm0vrCBmjwMGRU+23SmvkaMIL68XLI8tnQQvtsPqL4iVbS+0j+bPPYzIj4pq6u+qyQhvqgztTwuUgO+sEetvvahtL6/MKA8gnIvPobksL6nwCG+EUO8PIzB672dgrK+itQWPclmqTwbCc2+FSKyvi9kIr6KlE88GIbPvc3Bs76v5hQ9XmAuPKCsx76BYrO+jbsivuhWOjsHc8C9GgO1viBKtb4JsHw6XWZMPk6juL7SySK+4WaiO+r9vb0LRLq+M1W1vn01SzvvTk4+d+S9vgPoIr5NpOk7nMq4vYGFv75UaLW+MIKuOz2bUT5QJsO+rhYjvg1UGjxmwLC90sfEvpNOEj2dGPw7K4PAvi9qxL7UViO+C8U1OfSvpb1VDMa+xJK1vspbvbpv61g+/K3JvhJSI75X+jY7B4KmvRZQy75LlbW+XtOYOqNaWT7L8c6+6WAjviFQsTvs86O9C5TQvpehtb47snk7XXlbPv411L54gyO+4KcEPAUAnr2X2NW+2be1vmLA1jvTT18+/HrZvl66I77l1TI83YqUvSEe277fqA89mREbPEI3ub4wwtq+wgYkvlIfEjseXoe9GGbcvpD7Dj2/8m06wFW3vpYK3L6KEiS+3OzMu99Whb2crt2+eOS1vgSY97uuAmc+5lHhvsveI76Gfke7WkGOvWn14r6k6g890ESRu47oub5NmeK+zMIjvqSdP7ywFpO9iDzkvsHaED1kJle81YC8vtPf474ANWw+YuWnvCM7Kr8ig+G+78wSPQRsCr248sG+LiXhvteYIr4hdCm9KJbGvW7F4r4uKBc9qGUxvYwIzr6wZOK+L1whvshcUr2mUP29xQHkvu8vtL67fly9/wMcPlSc574F0x++iANQvYySIL57Nem+l2yzvg7cXL25l/Q9IszsvutLHr5tE1O9ylFCvl9h7r6Up7K+Fp9ivY2XsD0V9PG+lRALv8mOW73azrg+G4T3vtzbsb4M/T29q2xUPb4S+76kQxu+b705vdqagr44oPy+RDCxvgOjTr3PCrw8thUAv7PeGb6gwUy97waSvqraAL9udLC+5R5kvUWTDrxkngK/gFUYvmTVZL11CKO+YWEDv+Sjr7456369u7AzvQQjBb+Fhwm/nkGBvfwHaj4r4we/l7muvkfKb73Kt6q9dqIJvz8WCb9unna9qdxCPllgDL+l262+qAdnvbdP971sHQ6/h6kIvyDscL0hTB0+ItkQv2MErb6tVmS9Os0gvg+UEr+dPgi/5TNxvd3A8D2hTRW/vPc5v5SSZ711Pcg+yQUZv8yza7+/iEe9k1kqP5W8Hb8tljm/rwURvXkztz7JciG/kGdrv4hr57zInSM/Digmv/9aOb9EaX28wOSsPhPdKb9JWQe/Y8IOvLaBJD0Qkiy/UbuqvkqZAbz7sIK+Ikcuv7ZJB7/CPVW8TAkPPc/7ML/VMDm/YMxJvMKdpT78rzS/+TEHv5+bv7tql9w8L2Q3v/UfOb/o9a27b7GiPgYYO7+EJwe/eCWJOizBvzwEzD2/+WmqvgGCxjors4m+RoA/v+opB7/9QX273F7GPFA0Qr/IGTm/u4Rdu1OgoT4H6EW/IAprv6o+QDtydxs/bptKv08ZOb/1Dnc8po2hPiNPTr8WNAe/7TmvPDt24jxhA1G/RDs5v2nBszxYbKc+w7dUv6tcB7+4VOk8OzopPdFsV79VCau+nBnwPIUPeL6sIlm/zJIHvwxpyDwR23M9zthbv6Rtq74hKtI85bxmvqqPXb+8wQe/Gz+tPJ5Imj29RmC/AsWrvtaWuTwrqFe+eP5hv8USEL6HFZc8wfr9vuK2Yr9l1109Dp8LPAVqSL/kb2K/oXYQvk3R6buxm/m+zihjvwwVrL46VIq8EtJJvlbhZL/QHRC+zJ6qvHF3/b7OmWW/V+5fPci6+7xZUku/JlJlv3FgD76A7T692egCv6sJZr9DPau+ndFovYRWb74LwGe/B1oHv0H3e72I4CU9C3Vqv1sQOb/3pXi91lWgPpIobr/E5wa/of5eve6WADtJ23C/c6U4v3vVXr290I0+rIx0v/OBBr+5JEi9b4EEvVo9d79kRTi/J8tKvaVkej7S7Hq/WQhqvxnDNr2jbAU/EZt/v5ntN7/6EAy96/BbPmSkgb8r2wW/KvH0vFg3tb0Q+4K/2Jinvj24Ab3s6Me+ltGDv5ChBb+LtCG9lgPdva4nhb+Vbje/uosqveEuMD5E/Ya/eDlpv39zHL3sEuc+NDZJvZfbArzW1f08AcM3vbPdSb2TbFC+Hnz2PCjOgz46ilq9IScRvM5UED03zdG8BURbvZwyPj62Ow49SCudvsYMTL1fiCG8IizqPDogaLuJ20y9EUc9PpKX6Tzo+pK+Ibc9vaTnLrz2jro8UwltPAKXPr0MiDw+xu28PC25ir7igS+9+KM5vI+JkDx+7+w8gW8wvXKpU76rRpU8QXynPldeQb0VCkK8Et/KPEnRJD22VkK9E047Ps120TzVZXq+tFozvf5cwT6CZqk8EG8Hv4tqFL1MoTo+DnIlPApua75cfAW9kiTBPko3tDvq9gS/dirNvOlmOj5ULKC7mWFmvm9Xr7zLS1W8/84ZvHZ8hz15ebG8Q5w6PnshBLxk+2q+6Z2TvFc1Ubw1U0+85G94PXy1lbys6To+N3M7vJ6ocb4Sm2+8CnHBPvJjhLx8Qgi/N5vnu5RQOz69mNu8L5d6viBzX7tzZ0G8d9gBvaNYIT0L7G67rWlTvr48/bzh0KQ+5cP+uzC+MrwFf8i81t2gPBz1Ary5m1K+YkfFvDznmz4kWka8tF4nvMpjk7xCh407FLNJvCL/Ub6irpK8iiGVPgBzhrzt9R68pOtFvDSF5bvwCYi8iws+Pjc3SLz2YJu+YkNTvMVEGbw81JW82lpwvB5UVrxaHFG+izuYvApciz48n4y8o6EQvGxGV7xAcde8fhGOvLH0Pj6N5F+8K22lvuIHX7yDWgq8A+KkvIpcDr1BzGG8jSZQvsuTqrzJxYA++jOSvD7DALy+XoG83D9DvZt9k7xE/D8+GC6JvBPKsL7Fi2m8tknyu67AwbxKTm29B/hrvIOSQD6xPsu8+0m3voFYLrwFmtu73fICvfD4lb2vijC8eGVBPpTyCL0ObMC+Tk/lu9zVxD4wvCe9tcktv7sdNTrjeEI+5Vhfvfd2zL4LGpM7D/ePu+IHgL3/pf698TiQOxBVS76vH4W99IgXPrNv4TkKEMm+7x9+vQnF1j46RfO7nnpJvvfCW70n99w9xRs6vDc1brpI7FK9RQ1Ovv9nOryp8Ue+OWhjvUMnmT1pY3q8AnLHvu9HXb1h6rI+RAS9vOVXRr6Np0C9WcUkPWnA3LycmQc77Vs9vVCpiL6gady8+PpEvpM5U719gDE88e37vDSoYjtfVlK9U2eYvuFc+7wLekO+07hqvdoCsbzOUQ29dDLFvvl9bL05QoE+/94svbpTFL+Jz1e9M8EGP/NVXL2lZMS+b7AsvWrMXj42wnu9G2RAvoXdGr2vbrS9MZOFvXLPw74mFSK96AhFPmM9lb15Qj++4FESvd5c5r3i45y9wM0QPMqIG713GtW+Noecvd8uPr6BoT29UAALvq8ipL2so8K+QMBIvaljET7mtLO9LxUTv60ePb0SbtY+aD3LvbXxwb6gzxq99D3lPWLB2r3jjDu+MKQRvRsMRb7mQeK9f2jBvrdnIb2/67U95rrxvVWBEr/YIBq9Zta8PuCVBL5J2MC+H9T3vGwhhD2cTAy+ZUISvxdC7bz867E+AAAYvl9pwL68UrS8l7g7PUuyH74RExK/edCsvMS4qT7mYSu+ffJDv7sBbbzUEB4/5g47vu7xEb8Duwq7lPajPtq7Rr7l+L++9IGMOzxK3DymaU6+7TM4vn4hnjsrZIe+xBhSvvkJwL5TuPK5x9XzPD/HWb4Q+BG/GscKOb8EpT6xdGW+WQnAvoCP1zvq+vI8JSNtvopdOL67/+o73JmFvhnTcL4eI8C+afX/Oj1ACz2Vgni+5wYSvySKLDs9k6c+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAE+CgL4R8h++vYAvvCy2Hb7FG4K+vsuzvnn4YbwBhAo+U7SFvkLMC79GpTW8D97YPttLi74knbO+3GYru0J4Aj56446+n78LvwDRjLhdqtY+gXqUvhiYs74pSQg8G5oBPgYSmL5Nxwu/KMIxPAn+1z5bqZ2+8ruzviv/nTzFzAc+mEGhvqTzH76IubM8Bp0dvhLbor4aCbS+sIGaPAwZFT7bdKa+Rowgvrxcsjy2dBC+2w+oviJVtL7SP5s89jMiPimrq76rJCG+qDO1PC5SA76wR62+9qG0vr8woDyCci8+huSwvqfAIb4RQ7w8jMHrvZ2Csr6K1BY9yWapPBsJzb4VIrK+L2QivoqUTzwYhs+9zcGzvq/mFD1eYC48oKzHvoFis76NuyK+6FY6OwdzwL0aA7W+IEq1vgmwfDpdZkw+TqO4vtLJIr7hZqI76v29vQtEur4zVbW+fTVLO+9OTj535L2+A+givk2k6Tucyri9gYW/vlRotb4wgq47PZtRPlAmw76uFiO+DVQaPGbAsL3Sx8S+k04SPZ0Y/Dsrg8C+L2rEvtRWI74LxTU59K+lvVUMxr7EkrW+ylu9um/rWD78rcm+ElIjvlf6NjsHgqa9FlDLvkuVtb5e05g6o1pZPsvxzr7pYCO+IVCxO+zzo70LlNC+l6G1vjuyeTtdeVs+/jXUvniDI77gpwQ8BQCevZfY1b7Zt7W+YsDWO9NPXz78etm+XrojvuXVMjzdipS9IR7bvt+oDz2ZERs8Qje5vjDC2r7CBiS+Uh8SOx5eh70YZty+kPsOPb/ybTrAVbe+lgrcvooSJL7c7My731aFvZyu3b545LW+BJj3u64CZz7mUeG+y94jvoZ+R7taQY69afXivqTqDz3QRJG7jui5vk2Z4r7MwiO+pJ0/vLAWk72IPOS+wdoQPWQmV7zVgLy+09/jvgA1bD5i5ae8IzsqvyKD4b7vzBI9BGwKvbjywb4uJeG+15giviF0Kb0olsa9bsXivi4oFz2oZTG9jAjOvrBk4r4vXCG+yFxSvaZQ/b3FAeS+7y+0vrt+XL3/Axw+VJznvgXTH76IA1C9jJIgvns16b6XbLO+DtxcvbmX9D0izOy+60sevm0TU73KUUK+X2HuvpSnsr4Wn2K9jZewPRX08b6VEAu/yY5bvdrOuD4bhPe+3Nuxvgz9Pb2rbFQ9vhL7vqRDG75vvTm92pqCvjig/L5EMLG+A6NOvc8KvDy2FQC/s94ZvqDBTL3vBpK+qtoAv250sL7lHmS9RZMOvGSeAr+AVRi+ZNVkvXUIo75hYQO/5KOvvjnrfr27sDO9BCMFv4WHCb+eQYG9/AdqPivjB7+Xua6+R8pvvcq3qr12ogm/PxYJv26edr2p3EI+WWAMv6Xbrb6oB2e9t0/3vWwdDr+HqQi/IOxwvSFMHT4i2RC/YwStvq1WZL06zSC+D5QSv50+CL/lM3G93cDwPaFNFb+89zm/lJJnvXU9yD7JBRm/zLNrv7+IR72TWSo/lbwdvy2WOb+vBRG9eTO3PslyIb+QZ2u/iGvnvMidIz8OKCa//1o5v0RpfbzA5Kw+E90pv0lZB79jwg68toEkPRCSLL9Ru6q+SpkBvPuwgr4iRy6/tkkHv8I9VbxMCQ89z/swv9UwOb9gzEm8wp2lPvyvNL/5MQe/n5u/u2qX3DwvZDe/9R85v+j1rbtvsaI+Bhg7v4QnB794JYk6LMG/PATMPb/5aaq+AYLGOiuzib5GgD+/6ikHv/1BfbvcXsY8UDRCv8gZOb+7hF27U6ChPgfoRb8gCmu/qj5AO3J3Gz9um0q/Txk5v/UOdzymjaE+I09OvxY0B7/tOa88O3biPGEDUb9EOzm/acGzPFhspz7Dt1S/q1wHv7hU6Tw7Oik90WxXv1UJq76cGfA8hQ94vqwiWb/Mkge/DGnIPBHbcz3O2Fu/pG2rviEq0jzlvGa+qo9dv7zBB78bP608nkiaPb1GYL8Cxau+1pa5PCuoV754/mG/xRIQvocVlzzB+v2+4rZiv2XXXT0Onws8BWpIv+RvYr+hdhC+TdHpu7Gb+b7OKGO/DBWsvjpUirwS0km+VuFkv9AdEL7Mnqq8cXf9vs6ZZb9X7l89yLr7vFlSS78mUmW/cWAPvoDtPr3Z6AK/qwlmv0M9q76d0Wi9hFZvvgvAZ78HWge/Qfd7vYjgJT0LdWq/WxA5v/eleL3WVaA+kihuv8TnBr+h/l697pYAO0nbcL9zpTi/e9Vevb3QjT6sjHS/84EGv7kkSL1vgQS9Wj13v2RFOL8ny0q9pWR6PtLser9ZCGq/GcM2vaNsBT8Rm3+/me03v/oQDL3r8Fs+ZKSBvyvbBb8q8fS8WDe1vRD7gr/YmKe+PbgBvezox76W0YO/kKEFv4u0Ib2WA929rieFv5VuN7+6iyq94S4wPkT9hr94OWm/f3McvewS5z5SUom/9SM3v2v17rzRXRY+s91JvZNsUL4efPY8KM6DPjqKWr0hJxG8zlQQPTfN0bwFRFu9nDI+PrY7Dj1IK52+xgxMvV+IIbwiLOo8OiBou4nbTL0RRz0+kpfpPOj6kr4htz29pOcuvPaOujxTCW08Apc+vQyIPD7G7bw8LbmKvuKBL734ozm8j4mQPH7v7DyBbzC9cqlTvqtGlTxBfKc+V15BvRUKQrwS38o8SdEkPbZWQr0TTjs+zXbRPNVler60WjO9/lzBPoJmqTwQbwe/i2oUvUyhOj4OciU8Cm5rvlx8Bb2SJME+Sje0O+r2BL92Ks286WY6PlQsoLuZYWa+b1evvMtLVbz/zhm8dnyHPXl5sbxDnDo+eyEEvGT7ar7pnZO8VzVRvDVTT7zkb3g9fLWVvKzpOj43czu8nqhxvhKbb7wKccE+8mOEvHxCCL83m+e7lFA7Pr2Y27wvl3q+IHNfu3NnQbx32AG9o1ghPQvsbrutaVO+vjz9vOHQpD7lw/67ML4yvAV/yLzW3aA8HPUCvLmbUr5iR8W8POebPiRaRry0Xie8ymOTvEKHjTsUs0m8Iv9RvqKukryKIZU+AHOGvO31Hryk60W8NIXlu/AJiLyLCz4+NzdIvPZgm75iQ1O8xUQZvDzUlbzaWnC8HlRWvFocUb6LO5i8ClyLPjyfjLyjoRC8bEZXvEBx17x+EY68sfQ+Po3kX7wrbaW+4gdfvINaCrwD4qS8ilwOvUHMYbyNJlC+y5OqvMnFgD76M5K8PsMAvL5egbzcP0O9m32TvET8Pz4YLom8E8qwvsWLaby2SfK7rsDBvEpObb0H+Gu8g5JAPrE+y7z7Sbe+gVguvAWa27vd8gK98PiVva+KMLx4ZUE+lPIIvQ5swL5OT+W73NXEPjC8J721yS2/ux01OuN4Qj7lWF+993bMvgsakzsP94+74geAvf+l/r3xOJA7EFVLvq8fhb30iBc+s2/hOQoQyb7vH369CcXWPjpF87ueekm+98JbvSf33D3FGzq8NzVuukjsUr1FDU6+/2c6vKnxR745aGO9QyeZPWljerwCcse+70ddvWHqsj5EBL285VdGvo2nQL1ZxSQ9acDcvJyZBzvtWz29UKmIvqBp3Lz4+kS+kzlTvX2AMTzx7fu8NKhiO19WUr1TZ5i+4Vz7vAt6Q77TuGq92gKxvM5RDb10MsW++X1svTlCgT7/3iy9ulMUv4nPV70zwQY/81VcvaVkxL5vsCy9asxePjbCe70bZEC+hd0ava9utL0xk4W9cs/DviYVIr3oCEU+Yz2VvXlCP77gURK93lzmveLjnL3AzRA8yogbvXca1b42h5y93y4+voGhPb1QAAu+ryKkvayjwr5AwEi9qWMRPua0s70vFRO/rR49vRJu1j5oPcu9tfHBvqDPGr30PeU9YsHaveOMO74wpBG9GwxFvuZB4r1/aMG+t2chvb/rtT3muvG9VYESv9ggGr1m1rw+4JUEvknYwL4f1Pe8bCGEPZxMDL5lQhK/F0LtvPzrsT4AABi+X2nAvrxStLyXuDs9S7IfvhETEr950Ky8xLipPuZhK7598kO/uwFtvNQQHj/mDju+7vERvwO7CruU9qM+2rtGvuX4v770gYw7PErcPKZpTr7tMzi+fiGeOytkh77EGFK++QnAvlO48rnH1fM8P8dZvhD4Eb8axwo5vwSlPrF0Zb5ZCcC+gI/XO+r68jwlI22+il04vrv/6jvcmYW+GdNwvh4jwL5p9f86PUALPZWCeL7nBhK/JIosOz2Tpz6bGIK+tSvAvgxiFjxtLRE9lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.6537982225418091, 0.7480552792549133, 0.35675248503685, 0.8867892622947693, 0.34405601024627686, 0.8922792077064514, 0.32081732153892517, 0.900262176990509, 0.7113386988639832, 0.6747792959213257, 0.731661856174469, 0.6502748727798462, 0.7509464621543884, 0.6238872408866882, 0.7694880962371826, 0.40483903884887695, 0.8611644506454468, 0.41056203842163086, 0.8615332841873169, 0.594387412071228, 0.7814565896987915, 0.5843990445137024, 0.7871851325035095, 0.5718697309494019, 0.7941381931304932, 0.443391352891922, 0.8467154502868652, 0.5640233159065247, 0.7952790260314941, 0.5560895204544067, 0.7993307113647461, 0.5452324748039246, 0.8047934770584106, 0.5313411355018616, 0.8115816712379456, 0.4857438802719116, 0.822441816329956, 0.4813523292541504, 0.8271824717521667, 0.5324507355690002, 0.8056276440620422, 0.46916747093200684, 0.8332657814025879, 0.44921472668647766, 0.1553802192211151, 0.938803493976593, 0.8697534799575806, 0.35048192739486694, 0.8889293074607849, 0.7061596512794495, 0.6530068516731262, 0.7467408180236816, 0.6007909178733826, 0.7833701372146606, 0.45725783705711365, 0.831234872341156, 0.5036577582359314, 0.831467866897583, 0.44791221618652344, 0.8557592034339905, 0.38761278986930847, 0.8783908486366272, 0.6753438711166382, 0.6768608689308167, 0.7193108797073364, 0.6272778511047363, 0.7571271657943726, 0.5746353268623352, 0.7901135683059692, 0.4809771776199341, 0.18898361921310425, 0.9214778542518616, 0.1946869194507599, 0.9212345480918884, 0.8093040585517883, 0.5057886242866516, 0.8030053973197937, 0.5049028992652893, 0.8032615780830383, 0.4986136257648468, 0.8062254190444946, 0.5137951374053955, 0.787377655506134, 0.4885161519050598, 0.1918935924768448, 0.9216127991676331, 0.8230971097946167, 0.43188774585723877, 0.8354758620262146, 0.6013743281364441, 0.6997705101966858, 0.6221972107887268, 0.6802588701248169, 0.6396864652633667, 0.33783024549484253, 0.1504456251859665, 0.9276463985443115, 0.8611058592796326, 0.3066280484199524, 0.12906235456466675, 0.9389564990997314, 0.8911041021347046, 0.770799994468689, 0.5167937874794006, 0.7597653269767761, 0.5536934733390808, 0.7326628565788269, 0.5852333307266235, 0.29404735565185547, 0.8741856813430786, 0.6928068995475769, 0.3840416669845581, 0.8318026065826416, 0.6462596654891968, 0.3603768050670624, 0.8428529500961304, 0.4279141128063202, 0.8768115043640137, 0.6203874945640564, 0.7618516087532043, 0.6473038196563721, 0.745112955570221, 0.6674724221229553, 0.731564462184906, 0.3179558217525482, 0.9055505990982056, 0.7141088843345642, 0.314890056848526, 0.8963093757629395, 0.3072061538696289, 0.9014476537704468, 0.714926540851593, 0.6900429725646973, 0.7282330393791199, 0.6736318469047546, 0.2557612657546997, 0.918137788772583, 0.7811395525932312, 0.40977048873901367, 0.8699129819869995, 0.4324297606945038, 0.863659679889679, 0.44798779487609863, 0.8595702648162842, 0.5433138608932495, 0.8255105018615723, 0.4877859652042389, 0.843747615814209, 0.5010747313499451, 0.8442225456237793, 0.5334628820419312, 0.8223601579666138, 0.4510672390460968, 0.8635732531547546, 0.411889910697937, 0.8779820799827576, 0.3639357388019562, 0.10682330280542374, 0.9534547328948975, 0.9134367108345032, 0.7707125544548035, 0.41555944085121155, 0.8564978837966919, 0.5332484245300293, 0.8350723385810852, 0.5360636115074158, 0.8027918934822083, 0.41831517219543457, 0.8751989006996155, 0.35682371258735657, 0.8933011889457703, 0.7044357657432556, 0.3348449766635895, 0.8834495544433594, 0.6413818597793579, 0.759468674659729, 0.6051672101020813, 0.21704421937465668, 0.9261196851730347, 0.8165813088417053, 0.504275918006897, 0.813959002494812, 0.46903300285339355, 0.8487949371337891, 0.5728330016136169, 0.777737021446228, 0.5921180248260498, 0.769203782081604, 0.6028798818588257, 0.23415561020374298, 0.9180946350097656, 0.7853320837020874, 0.43054547905921936, 0.8574968576431274, 0.5692688226699829, 0.7983015179634094, 0.451924592256546, 0.849402666091919, 0.5448324084281921, 0.8133159875869751], "advantages": [13.10532283782959, 13.717348098754883, 13.11357307434082, 12.386098861694336, 12.870925903320312, 12.268976211547852, 12.5468111038208, 12.066407203674316, 12.141822814941406, 12.150308609008789, 11.878232955932617, 11.793551445007324, 11.579999923706055, 11.404545783996582, 11.245341300964355, 10.980606079101562, 11.080321311950684, 10.78364372253418, 10.843639373779297, 10.602641105651855, 10.547697067260742, 10.17845630645752, 10.16540813446045, 9.72918701171875, 9.754876136779785, 9.252480506896973, 9.068318367004395, 9.003835678100586, 9.083035469055176, 8.512777328491211, 8.624432563781738, 7.99703311920166, 8.138105392456055, 7.453843116760254, 7.621213436126709, 6.880115509033203, 6.397294998168945, 6.515971660614014, 6.003903865814209, 6.151065349578857, 6.356845855712891, 5.554975509643555, 4.978507995605469, 5.169253349304199, 4.590817451477051, 5.088864803314209, 4.489740371704102, 4.663486957550049, 4.215814113616943, 4.354055881500244, 4.629079818725586, 3.8671088218688965, 4.1565375328063965, 3.4034528732299805, 3.6891298294067383, 3.7238264083862305, 3.053161382675171, 2.3295602798461914, 2.5724568367004395, 1.9142333269119263, 2.086911201477051, 1.526548147201538, 1.599770188331604, 1.6244124174118042, 0.9612365365028381, 0.9450251460075378, 0.3171690106391907, 0.23584073781967163, -0.3332659602165222, -0.5028881430625916, -0.7209151983261108, -1.1251213550567627, -1.6873725652694702, -2.0129201412200928, -2.6844675540924072, -3.4554150104522705, -3.9211435317993164, -4.36565637588501, -4.60673189163208, -5.413742542266846, -5.6569695472717285, -6.502972602844238, -6.857282638549805, -7.545830726623535, -7.813848495483398, -7.862382888793945, -8.984331130981445, -9.98784351348877, -10.111977577209473, -11.2002592086792, -11.644142150878906, -12.368237495422363, -12.832771301269531, -13.56917667388916, -14.060062408447266, -13.622071266174316, -12.163074493408203, -14.433777809143066, -16.19291114807129, -15.418161392211914, -13.598258972167969, -16.11433982849121, -18.21383285522461, -19.677331924438477, -20.355953216552734, -21.185104370117188, -21.927322387695312, -22.685121536254883, -23.491466522216797, -23.77059555053711, -25.13155174255371, -25.83732795715332, -25.3378849029541, -27.16590690612793, -28.12954330444336, -28.48270606994629, 10.988126754760742, 10.701079368591309, 10.53136920928955, 11.316716194152832, 10.090356826782227, 10.812117576599121, 9.648663520812988, 10.314984321594238, 9.204715728759766, 9.106141090393066, 8.722440719604492, 9.25240421295166, 10.888703346252441, 8.776987075805664, 10.434324264526367, 8.351195335388184, 7.403763771057129, 7.910619258880615, 6.9539289474487305, 7.478797912597656, 9.292895317077637, 7.13918924331665, 6.120083332061768, 6.030979156494141, 5.648583889007568, 5.506397724151611, 5.166876792907715, 4.978718280792236, 4.672268867492676, 5.469540596008301, 4.21985387802124, 3.95556902885437, 3.720889091491699, 4.641626358032227, 3.2715249061584473, 2.915140151977539, 2.7774789333343506, 3.8700737953186035, 2.345771551132202, 3.581430196762085, 1.9568488597869873, 3.3888776302337646, 6.570765495300293, 3.4980804920196533, 1.574702501296997, 0.7716585397720337, 1.068275809288025, 0.34088781476020813, 1.334826946258545, 0.08975276350975037, 0.013926140032708645, -0.17896458506584167, 1.4879997968673706, -0.21621160209178925, 1.8389824628829956, -0.05637194961309433, -0.8427844047546387, -0.4693267345428467, -1.1085247993469238, 0.2136649638414383, -1.1392728090286255, 0.5177525281906128, 3.271544933319092, 1.1374188661575317, -0.5443050861358643, -1.4161131381988525, -0.27429434657096863, 1.9518009424209595, 0.21473242342472076, -1.1469491720199585, 0.5348289012908936, -0.9374476671218872, 0.7671730518341064, -0.7198417782783508, -1.671157956123352, -0.7234440445899963, 0.6133512854576111, 1.7369062900543213, 0.6506044864654541, -0.5637465119361877, 0.42721620202064514, 1.1776400804519653, 0.3433816134929657, -0.6211967468261719]}
+{"type": "SampleBatch", "eps_id": [1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 1411378167, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAJsYgr61K8C+DGIWPG0tET2F8IW+6RESv0j/ITzveak+R8iLvm5PwL4kO4c8z9EpPeigj77fDzm+GAaOPGXfe76qepG+i8JnPMhySzx1ewi/lVWRvhd/Ob4zAuY64T1yvnQwk77ZqcC+qhBDu6YgaD3kCpe+okUSv4+R8bpWY7I+t+Scvs+gwL7M8ac7lOVhPfm+oL61SBK/iBbMO5vqsj7smKa+E7bAvu+MWDy/knA9mnOqvs5aEr/fy2s8mgu2PkZOsL6IW0S/FyewPDAaJz/6KLi+EnwSv5GMDT1o27s++gS+vqhTwb4xmys91LuuPdDiwb4BwxK/d5gyPbomyD6nwce+ivPBvqmeUj00/OU9r6LLvrDjPL610Vs9sqgnvj2Gzb73t8K+DWhOPe/gFD4za9G+M2g+vhZRWj2MHwa+pFLTvv03CTw+lk89VxHQvq88077pm08+yksuPTnDLr81KdG+Fz7nO3m+7DyC+MC+tRbRvoT8QL5Q/q487BiavcEE076OjMS+ZqqiPBx7ZT4V89a+6ZZBvu9hxzxQ+369rOLYvokvuzvsLr08kb6xvrPT2L53T00+Gk6EPIDpIb8axta+wuqoO2BzZTuTY6u+l7jWvruTQr4aTlG7xMQnvbWq2L7eKsW+2n6Du6RdgD40nNy+LYUUvzFAgzr2yAo/DY3iviwlxb4YDUI8hcF/Pm9+5r4oqUK+T/KJPGBoIL3EcOi+cZGcO7yHgzwFKae+PmTovrd1TD7vExw8IjMdv9NY5r6go5A7G400u3gEo75BTea+a11MPg54FbxcqBy/FELkvlUjljvR/q68n/CkvhE25L6Fy0K+ssbjvA6YFL2+KOa+GxfFvky46bzKcX0+2Bnqvp5jFL82K8G8HvMHP1kJ8L7LtcS+b1JUvFuUbD6B+PO+HkIUv9edCLyhBAU/q+b5vvSNxL4YlgY7s69lPgfV/b5pPBS/20rWO5qDBD984QG/8Z3EvtFhijxtd2g+0tgDvy2uQb6mk688ePF2vbzQBL/6s7k787KlPGM4sb5OyQS/S0pCviH6WTxMF0G9/8EFv9gkxb6gh0o8zLN/Pq+6B796qkK+YS2OPOrxH73bswi/UFzFvovHhzxbo4Q+Gq0Kv58pQ75CObI8Uz/ovOmmC7+wHIo7JpStPHzQoL5ioQu/kspDvng8dDzKgXK8/5sMv+nnxb7WYm88CqqQPqOWDr8yOUS+T/ylPOP0s7vNkQ+/l8pRO/cVpTx+W5W+m40Pv2XISj43lWo8EPYTvwuKDr/4oDE7LsU0O+7Gj75+hg6/0hFFvpBMO7vwrlA8voIPv4xqxr66miq7oumbPrB+Eb9Y/US+LYhkO2R0NDzVehK/O2zGvuP3cjsdDpw+y3YUv08YRb4MniA8QqRZPBRzFb+qISQ7X/gkPON1jb7LbxW/ijhKPvfelDv61hC/82wUvxR3Fjs26927RBiLvvFpFL+vWkW+5fpHvKOUmjyOZhW/Jn3Gvv3LQbzNh50+sGIXv5cARb5l9Lm7GO04PNpeGL8Lqzg7wI6yuxr9kL4oWxi/5NZEvlISNrw/4f47HFcZv1o9xr7UhTO8ygeYPptSG78phES+U3Kku9XpVTolThy/oB/Gvmzpo7tndZU+V0kev6D+FL9iKVs6TkAVPzJEIb/EF8a+N71MPJvIlD5QPyO/xZBEvvb6lTyZD/Y66jokv3XNPTuzSZY8CeiRvh83JL8nGkW+GTJPPO4tXDxpMyW/r/AgO2qZUzyg6oy+MTAlv+MgSj5Q0/I7zVUQv3gtJL+96cg+XVh9u3epWr8hKyK/SxNKPqCcq7wGDBC/eSghvwX7JDuk5gO9IaiNviwlIb9v1ko+5pAavWVPFL+LISC/XwxmO3cGSr0n+Zi+8Rwgv8p0Q75AgGK9apy0vCAXIb//M8W+nU5kvWdggT73DyO/ZNVBvlqbT71VpGm9Eggkv9htxL6ZR1S9jn1gPu7+Jb/dVUC+CFJCvYf4tr0e9Sa/xLTDvqejSb1EhEA+IOoovz7sPr7qPDq90VX1vYLeKb9cSRg8JQ1EvdNZ2r5T0im/e489vsz8Zr2Y1Bi+9sQqvzo/wr7ENnO9dyAAPjy2LL8p3zu+uvZovVIkPr62pi2/J2bBvtUseL2WTbU90JUvv2VqEr9K7HC95BW5PnaDMr/vhsC+ME9TvUlTUD1UcDS/GwQSv5AkT71iVqc+7ls3vyLGQ79sXjS90WcaP0tGO78hrBG/hfUCvY8GmD4jMD6/cze/vhpF1bzEtrm7pxlAv49BNr7RMta88uacvvACQb+21b6+HjQEvbVBtbx660K/B2g1viMEBr0vTqa+rdNDv1jgljwBoCC9oawgv4m7Q7/xXDS+cgpUvbPysb5nokS/KrC9vjODcL1WtZK9AYhGvw7DMr5+YXa9WrbDvtFsR78l0ry+79iKvSV8370zUEm/mukwvi1Rj70RRdi+fRsKPUwbFb23BOS84esDvRAgBz3n7CI+l0vpvNrFqr7GKBQ9DpG1Pqf4D724kyK/vjUxPRXZIz72/kO9ghK1vllRPj1i6Qy9r/dgvaiLnL3hfzs9LC9qvrY6Z71ZL0s+ycMoPXtnBr17+Va90GTkvaMTJj3gmGi+PBxgvcYXKD4MeBM9lyQAvbGpUr1SwxS+9ecQPZsMZ75ckF69u+IFPiLY/DwCAte+YdpTvbHFzz6xCrg8FYBlvgecMr0AOMc9XFKTPFms6LwHpCq9TtpVvhKrjjwlQWS+vL87vcc1kD2JS1Q8q63VvgX7Nb02U7I+8BSXO0LvYr7Uchm98sQrPR3/OjmwFdW+XgMWvY8npT7QcwW89Vocv0ct97yefRo/KcumvFGZ1L6GTZS8TFeaPkPT6rz1Lhy/0NNFvCyfFj8tZCe9QRdOv3ICobmUkWA/JFdpvXgjHL9LNY08/J0VPwGnjb0dgNS+j/bsPKEvmD4Dp569VaFhvtLUDj3FX2I8eK2nvej21L6U9g89dXSiPvu2uL1/p2K+vPQpPdELEz3rx8G9Wd3bvJzlLD38P3m+WeHCvebgY7749Bg9Uh5/PdL+y703IuW8LQ8ePaagX74dJM29M/1kvkkrDD2LkbA99kzWvayj1r5ZOxM972vHPsZ4572/A2a+qCMzPTjr3T0grPC9WjfXvhsEPD0IO9Q+4PEAvtFSZ74S+V09Pe0LPkGSBb4kawG9xSppPQzIDb7pNwa+8fFovhbTXT3Tvi8+luAKvnLmB71a4ms9sfPTvYqOC74Vk2q+92djPZvAUz6QPxC+zYAOvadYdD2rAou99/UQvu9AIz4vyW49JqyuvhuyDb4xZRW9mtZSPdlK+7xVcQ6+uKkhPktTUD2kBZ2+njULvjxYtD6sMzc9GzkVv+P+A77jRSA+UXMHPcSNjb5KygC+mf4fvY2a4Twut9g8FZcBvhdkHz4i8OU8vMuDvgHO/L0AQCO9a8O7PG8tND3tb/699aUePnD4wjwjK3e+Xhf4vWcMsz5qbJs858UGv3TE6b0jBh4+ZVYKPExTab5KcuO9JdqyPoWxfjvEkgS/ZiPVvfTXHT57CtS7nlVlvhTTzr0L37I+UWgzvPzHBL/Mg8C95hgePgevrryp+Gq+4TC6vWGOJb10R9S8eRVnPbTYu73IzXC+JwnLvAV/rj6JesW9kR7cvm0yk7zlJSA/lBbXvY0ucL4Zz7K7rpKnPguy4L3/MCG917yOOva5Bj2xTuK9s30fPmr25Do71oS+ge3bvX5bIb2+lGG7qWMKPZSK3b0ShB8+5Es1uyochb4jKde9ZP4gvbiDAryzXgI9SMXYvSmsHz58K/C7u9aGvjxi0r0PGSC9u2FOvPM53TwV/NO9X6Rvvl+IRbzDoaE+BZLdvWinHr00Lby7Q4GdPCwo372qOSA+gZOvuzjvjL53v9i99QAevXT8Mbzk0oA89VPavTV2ID5M1Sy8AYyPvtTo073vvxy9AlqEvFYAEzwbetW90dkgPq/hgrzi2JO+/grPvULeGr1UMbK8imCYunWX0L0YMm6+FmKyvE+0kT6VHtq9vVEYvf3Bg7wO/HO8haTbvQ+lbb6XMoa84JuLPgAm5b0Daxa9qQszvAnfzbwSp+a97lsiPshHO7zEeaS+gyjgvRQqtT68RZK88xQev0Sq0b2syCI+3XH3vG03qb5cJ8u9TEUSvQrMFr2EiUK90J3MvefBIz4SsBq97f+zvvAQxr2x6A293Hw3vWd0kb05fMe9e/QkPlBOPb0pR8G+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAIXwhb7pERK/SP8hPO95qT5HyIu+bk/AviQ7hzzP0Sk96KCPvt8POb4YBo48Zd97vqp6kb6Lwmc8yHJLPHV7CL+VVZG+F385vjMC5jrhPXK+dDCTvtmpwL6qEEO7piBoPeQKl76iRRK/j5HxulZjsj635Jy+z6DAvszxpzuU5WE9+b6gvrVIEr+IFsw7m+qyPuyYpr4TtsC+74xYPL+ScD2ac6q+zloSv9/LazyaC7Y+Rk6wvohbRL8XJ7A8MBonP/oouL4SfBK/kYwNPWjbuz76BL6+qFPBvjGbKz3Uu6490OLBvgHDEr93mDI9uibIPqfBx76K88G+qZ5SPTT85T2vosu+sOM8vrXRWz2yqCe+PYbNvve3wr4NaE497+AUPjNr0b4zaD6+FlFaPYwfBr6kUtO+/TcJPD6WTz1XEdC+rzzTvumbTz7KSy49OcMuvzUp0b4XPuc7eb7sPIL4wL61FtG+hPxAvlD+rjzsGJq9wQTTvo6MxL5mqqI8HHtlPhXz1r7plkG+72HHPFD7fr2s4ti+iS+7O+wuvTyRvrG+s9PYvndPTT4aToQ8gOkhvxrG1r7C6qg7YHNlO5Njq76XuNa+u5NCvhpOUbvExCe9tarYvt4qxb7afoO7pF2APjSc3L4thRS/MUCDOvbICj8NjeK+LCXFvhgNQjyFwX8+b37mviipQr5P8ok8YGggvcRw6L5xkZw7vIeDPAUpp74+ZOi+t3VMPu8THDwiMx2/01jmvqCjkDsbjTS7eASjvkFN5r5rXUw+DngVvFyoHL8UQuS+VSOWO9H+rryf8KS+ETbkvoXLQr6yxuO8DpgUvb4o5r4bF8W+TLjpvMpxfT7YGeq+nmMUvzYrwbwe8wc/WQnwvsu1xL5vUlS8W5RsPoH4874eQhS/150IvKEEBT+r5vm+9I3EvhiWBjuzr2U+B9X9vmk8FL/bStY7moMEP3zhAb/xncS+0WGKPG13aD7S2AO/La5BvqaTrzx48Xa9vNAEv/qzuTvzsqU8Yzixvk7JBL9LSkK+IfpZPEwXQb3/wQW/2CTFvqCHSjzMs38+r7oHv3qqQr5hLY486vEfvduzCL9QXMW+i8eHPFujhD4arQq/nylDvkI5sjxTP+i86aYLv7AcijsmlK08fNCgvmKhC7+SykO+eDx0PMqBcrz/mwy/6efFvtZibzwKqpA+o5YOvzI5RL5P/KU84/Szu82RD7+XylE79xWlPH5blb6bjQ+/ZchKPjeVajwQ9hO/C4oOv/igMTsuxTQ77saPvn6GDr/SEUW+kEw7u/CuUDy+gg+/jGrGvrqaKrui6Zs+sH4Rv1j9RL4tiGQ7ZHQ0PNV6Er87bMa+4/dyOx0OnD7LdhS/TxhFvgyeIDxCpFk8FHMVv6ohJDtf+CQ843WNvstvFb+KOEo+996UO/rWEL/zbBS/FHcWOzbr3btEGIu+8WkUv69aRb7l+ke8o5SaPI5mFb8mfca+/ctBvM2HnT6wYhe/lwBFvmX0ubsY7Tg82l4YvwurODvAjrK7Gv2QvihbGL/k1kS+UhI2vD/h/jscVxm/Wj3GvtSFM7zKB5g+m1IbvymERL5TcqS71elVOiVOHL+gH8a+bOmju2d1lT5XSR6/oP4Uv2IpWzpOQBU/MkQhv8QXxr43vUw8m8iUPlA/I7/FkES+9vqVPJkP9jrqOiS/dc09O7NJljwJ6JG+HzckvycaRb4ZMk887i1cPGkzJb+v8CA7aplTPKDqjL4xMCW/4yBKPlDT8jvNVRC/eC0kv73pyD5dWH27d6lavyErIr9LE0o+oJyrvAYMEL95KCG/BfskO6TmA70hqI2+LCUhv2/WSj7mkBq9ZU8Uv4shIL9fDGY7dwZKvSf5mL7xHCC/ynRDvkCAYr1qnLS8IBchv/8zxb6dTmS9Z2CBPvcPI79k1UG+WptPvVWkab0SCCS/2G3EvplHVL2OfWA+7v4lv91VQL4IUkK9h/i2vR71Jr/EtMO+p6NJvUSEQD4g6ii/Puw+vuo8Or3RVfW9gt4pv1xJGDwlDUS901navlPSKb97jz2+zPxmvZjUGL72xCq/Oj/CvsQ2c713IAA+PLYsvynfO7669mi9UiQ+vramLb8nZsG+1Sx4vZZNtT3QlS+/ZWoSv0rscL3kFbk+doMyv++GwL4wT1O9SVNQPVRwNL8bBBK/kCRPvWJWpz7uWze/IsZDv2xeNL3RZxo/S0Y7vyGsEb+F9QK9jwaYPiMwPr9zN7++GkXVvMS2ubunGUC/j0E2vtEy1rzy5py+8AJBv7bVvr4eNAS9tUG1vHrrQr8HaDW+IwQGvS9Opr6t00O/WOCWPAGgIL2hrCC/ibtDv/FcNL5yClS9s/KxvmeiRL8qsL2+M4NwvVa1kr0BiEa/DsMyvn5hdr1atsO+0WxHvyXSvL7v2Iq9JXzfvTNQSb+a6TC+LVGPvRFF2L6mMkq/NNG7vmKeoL2ZPBy+ECAHPefsIj6XS+m82sWqvsYoFD0OkbU+p/gPvbiTIr++NTE9FdkjPvb+Q72CErW+WVE+PWLpDL2v92C9qIucveF/Oz0sL2q+tjpnvVkvSz7Jwyg9e2cGvXv5Vr3QZOS9oxMmPeCYaL48HGC9xhcoPgx4Ez2XJAC9salSvVLDFL715xA9mwxnvlyQXr274gU+Itj8PAIC175h2lO9scXPPrEKuDwVgGW+B5wyvQA4xz1cUpM8WazovAekKr1O2lW+EquOPCVBZL68vzu9xzWQPYlLVDyrrdW+Bfs1vTZTsj7wFJc7Qu9ivtRyGb3yxCs9Hf86ObAV1b5eAxa9jyelPtBzBbz1Why/Ry33vJ59Gj8py6a8UZnUvoZNlLxMV5o+Q9PqvPUuHL/Q00W8LJ8WPy1kJ71BF06/cgKhuZSRYD8kV2m9eCMcv0s1jTz8nRU/AaeNvR2A1L6P9uw8oS+YPgOnnr1VoWG+0tQOPcVfYjx4rae96PbUvpT2Dz11dKI++7a4vX+nYr689Ck90QsTPevHwb1Z3du8nOUsPfw/eb5Z4cK95uBjvvj0GD1SHn890v7LvTci5bwtDx49pqBfvh0kzb0z/WS+SSsMPYuRsD32TNa9rKPWvlk7Ez3va8c+xnjnvb8DZr6oIzM9OOvdPSCs8L1aN9e+GwQ8PQg71D7g8QC+0VJnvhL5XT097Qs+QZIFviRrAb3FKmk9DMgNvuk3Br7x8Wi+FtNdPdO+Lz6W4Aq+cuYHvVriaz2x89O9io4LvhWTar73Z2M9m8BTPpA/EL7NgA69p1h0PasCi7339RC+70AjPi/Jbj0mrK6+G7INvjFlFb2a1lI92Ur7vFVxDr64qSE+S1NQPaQFnb6eNQu+PFi0PqwzNz0bORW/4/4DvuNFID5Rcwc9xI2NvkrKAL6Z/h+9jZrhPC632DwVlwG+F2QfPiLw5Ty8y4O+Ac78vQBAI71rw7s8by00Pe1v/r31pR4+cPjCPCMrd75eF/i9ZwyzPmpsmzznxQa/dMTpvSMGHj5lVgo8TFNpvkpy470l2rI+hbF+O8SSBL9mI9W99NcdPnsK1LueVWW+FNPOvQvfsj5RaDO8/McEv8yDwL3mGB4+B6+uvKn4ar7hMLq9YY4lvXRH1Lx5FWc9tNi7vcjNcL4nCcu8BX+uPol6xb2RHty+bTKTvOUlID+UFte9jS5wvhnPsruukqc+C7Lgvf8wIb3XvI469rkGPbFO4r2zfR8+avbkOjvWhL6B7du9flshvb6UYbupYwo9lIrdvRKEHz7kSzW7KhyFviMp171k/iC9uIMCvLNeAj1Ixdi9KawfPnwr8Lu71oa+PGLSvQ8ZIL27YU688zndPBX8071fpG++X4hFvMOhoT4Fkt29aKcevTQtvLtDgZ08LCjfvao5ID6Bk6+7OO+Mvne/2L31AB69dPwxvOTSgDz1U9q9NXYgPkzVLLwBjI++1OjTve+/HL0CWoS8VgATPBt61b3R2SA+r+GCvOLYk77+Cs+9Qt4avVQxsryKYJi6dZfQvRgybr4WYrK8T7SRPpUe2r29URi9/cGDvA78c7yFpNu9D6Vtvpcyhrzgm4s+ACblvQNrFr2pCzO8Cd/NvBKn5r3uWyI+yEc7vMR5pL6DKOC9FCq1PrxFkrzzFB6/RKrRvazIIj7dcfe8bTepvlwny71MRRK9CswWvYSJQr3Qncy958EjPhKwGr3t/7O+8BDGvbHoDb3cfDe9Z3SRvTl8x7179CQ+UE49vSlHwb4W48C9JJcIvfs6XL2MQsy9lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.5199524164199829, 0.828216552734375, 0.5140385627746582, 0.18035228550434113, 0.9324020743370056, 0.8260346055030823, 0.4908517897129059, 0.8378863334655762, 0.47002214193344116, 0.8485099077224731, 0.43999046087265015, 0.1383698731660843, 0.9451899528503418, 0.8839677572250366, 0.3263181447982788, 0.9009080529212952, 0.7320062518119812, 0.6222200989723206, 0.7733772397041321, 0.43819767236709595, 0.16584333777427673, 0.9304763078689575, 0.8318285942077637, 0.5231486558914185, 0.8209273815155029, 0.5099197626113892, 0.1928052008152008, 0.9249619841575623, 0.8177680969238281, 0.5048879384994507, 0.17730273306369781, 0.932253360748291, 0.8360740542411804, 0.5454598665237427, 0.2143557071685791, 0.9181556701660156, 0.19947047531604767, 0.9252221584320068, 0.8249421119689941, 0.5356269478797913, 0.20226994156837463, 0.9230884909629822, 0.20110706984996796, 0.9242172241210938, 0.19192714989185333, 0.9272116422653198, 0.823756217956543, 0.5338760018348694, 0.7814702391624451, 0.45769694447517395, 0.8371107578277588, 0.4346748888492584, 0.8461841940879822, 0.592106819152832, 0.7362754344940186, 0.3974513113498688, 0.8586617708206177, 0.6268390417098999, 0.29485592246055603, 0.883057177066803, 0.7175191044807434, 0.3823699951171875, 0.8604565262794495, 0.3734237849712372, 0.8636725544929504, 0.6407763361930847, 0.3074577748775482, 0.8782146573066711, 0.7121286392211914, 0.38472887873649597, 0.8560131788253784, 0.6158082485198975, 0.7195335030555725, 0.39733144640922546, 0.8498152494430542, 0.396481454372406, 0.14997293055057526, 0.9339860677719116, 0.8583023548126221, 0.6429347991943359, 0.673652708530426, 0.6506326794624329, 0.33214858174324036, 0.13844002783298492, 0.9359982013702393, 0.8820406794548035, 0.26333263516426086, 0.8994787335395813, 0.7817967534065247, 0.5110031366348267, 0.7817914485931396, 0.5555487275123596, 0.7534081339836121, 0.5966616272926331, 0.7227045893669128, 0.36503785848617554, 0.8674027919769287, 0.6905041933059692, 0.6285871863365173, 0.7324374914169312, 0.4257999360561371, 0.8285356163978577, 0.4654594361782074, 0.19057074189186096, 0.9185949563980103, 0.8013376593589783, 0.4992280900478363, 0.7963412404060364, 0.46718254685401917, 0.18560022115707397, 0.9243147373199463, 0.8452135324478149, 0.35927814245224, 0.868760347366333, 0.30183133482933044, 0.8911712169647217, 0.415315181016922, 0.12365061789751053, 0.9488186240196228, 0.8980846405029297, 0.716732919216156, 0.659044623374939, 0.7603343725204468, 0.6008750796318054, 0.7970541715621948, 0.46184778213500977, 0.8372563719749451, 0.4994930326938629, 0.8425098061561584, 0.5560781955718994, 0.7909103631973267, 0.5886493921279907, 0.226023867726326, 0.9201210141181946, 0.2205154150724411, 0.07576221227645874, 0.9616395831108093, 0.935249388217926, 0.8421018123626709, 0.4603724479675293, 0.8659160733222961, 0.602634847164154, 0.768596887588501, 0.6461200714111328, 0.739647626876831, 0.31555724143981934, 0.9092029333114624, 0.2605026662349701, 0.9231515526771545, 0.7939956188201904, 0.5552677512168884, 0.8310684561729431, 0.4811696708202362, 0.8614320755004883, 0.5957092046737671, 0.7622209191322327, 0.6478646397590637, 0.27306675910949707, 0.9024930596351624, 0.714667022228241, 0.698287844657898, 0.6941055059432983, 0.7173634171485901, 0.3238767385482788, 0.892363429069519, 0.31465864181518555, 0.8983293175697327, 0.29090261459350586, 0.9077141880989075, 0.7451168894767761, 0.3576626479625702, 0.1131540983915329, 0.9507300853729248, 0.8918394446372986, 0.6574879884719849, 0.7465486526489258, 0.6497708559036255, 0.7548961043357849, 0.6366959810256958, 0.766804039478302, 0.3823505938053131, 0.8815728425979614, 0.6185122132301331, 0.778278648853302, 0.6005059480667114, 0.7919353246688843, 0.5752210021018982, 0.8085626363754272, 0.4582545757293701, 0.8534752130508423, 0.47200465202331543, 0.8493980169296265, 0.5202584862709045, 0.16641069948673248, 0.9388980269432068, 0.8601585626602173, 0.41317588090896606, 0.8800144791603088, 0.3510757386684418, 0.8989355564117432], "advantages": [13.86804485321045, 13.175607681274414, 13.719236373901367, 14.118341445922852, 14.801876068115234, 14.30284309387207, 14.008076667785645, 13.588269233703613, 13.82448959350586, 13.508691787719727, 13.582075119018555, 13.360777854919434, 13.023870468139648, 12.973896980285645, 12.651301383972168, 12.653926849365234, 12.162520408630371, 11.682069778442383, 11.809429168701172, 11.227328300476074, 10.974092483520508, 11.448655128479004, 10.89777660369873, 10.953437805175781, 11.128548622131348, 10.45494556427002, 10.011300086975098, 10.346689224243164, 9.921895027160645, 10.106973648071289, 10.3045015335083, 10.285378456115723, 9.615755081176758, 8.806126594543457, 8.1491060256958, 8.309440612792969, 7.983579635620117, 8.211711883544922, 7.857133388519287, 8.145045280456543, 8.392200469970703, 8.293962478637695, 7.687145233154297, 7.6393232345581055, 6.946270942687988, 6.941007137298584, 6.160737991333008, 5.158932209014893, 4.272641658782959, 4.648043155670166, 4.963296890258789, 3.9142699241638184, 4.2271246910095215, 3.1382246017456055, 2.133297920227051, 2.4980688095092773, 2.7819325923919678, 1.6436471939086914, 0.5795941948890686, 0.28523167967796326, 0.0589417926967144, 0.3272989094257355, 0.5336767435073853, -0.6180878281593323, -0.4089493155479431, -1.5944722890853882, -2.663055658340454, -2.961348533630371, -3.3038089275360107, -3.1845593452453613, -3.0634560585021973, -4.240775108337402, -5.215975284576416, -5.176181793212891, -5.092827320098877, -6.2793402671813965, -6.199551582336426, -6.284739017486572, -7.427353382110596, -8.703426361083984, -9.742770195007324, -9.774018287658691, -10.799638748168945, -11.069645881652832, -9.942083358764648, -11.582765579223633, -12.444509506225586, -12.274998664855957, -13.322125434875488, -13.908228874206543, -14.255329132080078, -15.217801094055176, -15.626358032226562, -16.52485466003418, -17.006019592285156, -17.83319091796875, -17.938188552856445, -18.991668701171875, -19.71113395690918, -20.28329849243164, -21.123502731323242, -21.606306076049805, -22.64682388305664, -23.177936553955078, -23.443754196166992, -24.81296157836914, -25.845666885375977, -26.1445369720459, -27.323701858520508, -27.49441146850586, -26.639259338378906, -28.63787269592285, -30.18505096435547, -29.91583251953125, -31.645811080932617, -31.13223648071289, 14.471434593200684, 15.03508472442627, 17.12688636779785, 15.078790664672852, 14.167634010314941, 14.203629493713379, 14.005565643310547, 13.904351234436035, 13.906237602233887, 13.652503967285156, 14.492643356323242, 13.354822158813477, 13.834983825683594, 13.211174964904785, 13.724588394165039, 13.037026405334473, 13.361952781677246, 14.903573989868164, 12.935162544250488, 14.34245777130127, 16.584243774414062, 13.7691068649292, 12.052897453308105, 11.985196113586426, 11.721636772155762, 11.650335311889648, 13.043719291687012, 11.345305442810059, 12.73244857788086, 11.027462005615234, 10.773356437683105, 10.599921226501465, 10.376823425292969, 10.099592208862305, 11.247505187988281, 9.597785949707031, 10.61972427368164, 9.061104774475098, 9.93366527557373, 11.822427749633789, 9.26461410522461, 11.083911895751953, 13.52344799041748, 10.450069427490234, 8.047859191894531, 9.800442695617676, 7.450640678405762, 9.151045799255371, 11.682793617248535, 8.617552757263184, 11.2544584274292, 8.196488380432129, 10.96518611907959, 7.911034107208252, 5.662945747375488, 4.671236515045166, 5.125830173492432, 4.045373439788818, 4.580812931060791, 6.474283695220947, 4.0751471519470215, 6.000354290008545, 3.5899806022644043, 5.561758518218994, 3.132011651992798, 1.9265995025634766, 2.5898499488830566, 4.613146781921387, 2.1183156967163086, 4.202277183532715, 1.6860337257385254, 3.8475253582000732, 1.3056964874267578, -0.12257706373929977, 0.8492254018783569, -0.6747030019760132, 0.364379346370697, 2.6335201263427734, 5.519772529602051, 2.5334067344665527, -0.07665462046861649, 2.468492269515991, -0.12134800106287003, 2.5339698791503906]}
+{"type": "SampleBatch", "eps_id": [53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 53199550, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAABbjwL0klwi9+zpcvYxCzL3CQMK9dR9pvppmZL31uDM+75PLvZUI2L7jBVa9tnLmPk3c3L38i2e+uyYxvYPNED5XH+a9Nxb5vCyRJb2SjCi+LF7nvTZTZr4ODTO9SKbrPbOU8L3GQu+8AaApvZerQ770xvG94BRlvlVHOb3BvbQ9wPD6vcgY1r6LDDK9V4+7ProIBr7gyGO+FgoUvfHUdj38lgq+/b/bvE8aD72/cnm+nyMLvsi+Yr4CDyO9oRIbPY+sD763+NS+CfUfvV2soj5jMRi+cZdhvvDtBb2StlQ8arQcvoKUyryq3QS9L2WUvhE2Hb6Eo2C+75scvZaY97s4tCG+Qe7TvmU6Hb1ir4s+Yy4qvoyEX77l4Aa9uOgBvcymLr5sadO+B3oJvS80gD6oGze+eYtevpnt6bygxle9Fo87vpr10r4lj/K8OmZsPk//Q77IURu/OrzMvDmZAz9AbFC+mI/Svj8GcbwLu1o+ZdhYvlMtG7/VByu8jGkAP2xCZb4YF02/NTbVuQZ+Sj+rqnW+NyMbv/qGfDy/EP8+8QmBvlR80r5z4s88I3BXPqE/hb4XjF2+ylryPPzsl73Kdoe+Z4OxvFoz5jxg+7a+mK+HvjBkXr54pas8FUhlvevoib6pP9O+oXmiPI0UeT6DIo6+/fxevvVTyjyunTC9XF2QvseS075oQ8M8praDPp6YlL6p0hu/YGntPP6uDj8+1Jq+KvXTvlNdJD1yQow+dxGfvhESHL9bzjo97jkUP6FPpb6klNS+Dj1qPTwhmj4KkKm+nDxivhtzgT3uitw8NdOrvlN01b5mjYI94H6tPhgYsL7aFmS+l26QPXkXiT0AYLK+M23WvoAskz0YFMM+3qm2vh0qZr61x6I9twTlPRb3uL6J5vy8SFynPZZ5Hr4ESLm+JoNovoAFoT2ScSY+P5u7vuqv2L7irac90Tn1Pq/wv75L2Gq+G0y7PcJGWj7jScK+5O3ZvkIHxD3+gQg/r6XGvl6Obb6d3tk9C1uLPtQFyb5R1B29ngTlPa9ZczzXasm+4IkePl2g5T1+VXa++9THvpNEsj7nxds95OP9vkBExL63XBs+O3bHPd6CL76GtsK+Rc6wPv9wwD3GGd2+Ry2/vu2UGD7bwK49wHrjvaumvb4Hc0G9KTSqPcA8VD56Ir6+kyEWPnixsj3C7229JKK8vrdYS71aULA9F4eFPkgkvb7SmxM+AP+6Pe3vZ7tnqru+z/qsPuTZuj3zOoi+vzS4vlcUCD/l8689OlUHv0vDsr7gsas+qEyaPcA3V743VK++sJEOPtSwkT1go9c9Peetvkyhqj7lAJY9rxAovp19qr62+gY/6EePPVKc3b5tF6W++Kg4P6Uaez3/4TO/grSdvnKBBj+mikE93FzIvitTmL5KyKg+ynshPQdPrL0B85S+NTEGP1mXGj1jbrq+4JSPvkYAOD9Ahvk8JQklv7Q4iL7U8QU/zeaPPDVjr74c3YK+dtynPhuOLzyyyzW9UgN/vpHXBT/uAiE8X9aqvjxOdL71tac+urNOOyg+G73hmG2+DZIHPj0GHTv79II+wuJqvo6rpz4YI/Y7/RIUvdEtZL4JbQc++nHeO46NhD5veGG+wZCnPnYORDyWmgG9kMRavoOwBT8tsDk83RukvpoSUL5TmTc/KlGhO48PHL9/YkG+U6EFP9Uy7rvheaG+wLE2vkxppz6ocV68rM/MvHb/L76/tAU/7aJmvOjUpL4pTSW+NrU3P3kQqLx3eB6/05oWvhq7aT8lvga9hQhrvwToA7655Tc/EvRRvYXMIr+YY+q9igVqP0cGg70nvnG/EfLEvSdOOD8QtKm9+CEsv+x0p70lrAY/nj7FvS9W0L696JG9UzSqPlrp1b2SlBW+9EqEvcBkBz8O5du9M4nwvnhCXb0Kvas+OyPvvUXmWb4QyEG9AZARPoba970lgic98SI2vct0rT6zLfa9ZSSTvi1iGr3fDRU+lvkAvmf1Dr2MdQ698sRCvZOwAb5Ftlw+xFoSvTG2GD4Ojfq9pQDqvTsjBr3kZjS9Jjv/vZTCDD7jvgm9IkkcPsSZ+b31ZUS+UHz6vHsmJr1xugC+2yN2PdmQAL3nSW++xP7+vYYJnz56tRO9TYnavq9F8r09xA8/uqw2vf3Ja74BRdu9SbhvPrCJSb0cZAq9Rq7RvScFub0/Tky9SsJovpRh1b3qZyw+J+1evQly174kfM69SFfaPuaygL1iyGW+hAS9vdp/1D3g44m9Dw/WvoXEuL2vXrs+zAObvfCfHL8wx6m9kP4gPyYTtL3RztS+2wSQvRxhnz50GcW94xQcv8REg73PuxQ/jhLevRHV075H8Va9aqOJPuEE7713Il++nOtAvbiUI73H8fe9uhvTviUxRL3hOHM+pGoEvui+Xb70uzC9lRuPvfrZCL4HctK+YXU2vVvmVT7wRBG+snZcvrVYJb2rsse9h2y1PHszb7xidE87A0CWPCwIszzlzjg+oH5nO2jzi77qmdA8I8VwvBKP/bohjqc8izHOPBXTVr7z8Me6VgOgPlTSqzyL9W+81dSaO7ycnjwJbKk827w4PjaFpzvZLIu+5PrGPH9Dcrxj+6m5Xge4PLKOxDwMqzg+VSYDOXtnir60GuI8fDdyvBkPrbubg7c8oa7fPFW+OD67YJ67GzyLvrk9/TwwT8A+mEwovPjuEL/aYx09fvY4PjLosLzirY2+5C8sPThRaLyPPt68LrgUPIYGKz0orDk+1sHcvFaGlb4Z4Tk9GKhbvHRNBr24sQK88Mc4PbGKOj6+9Aa9FCSfvlC0Rz1rRUy8J2sgvSUw67zYrkY9lvlTvjzFIr3DsoA+mbk1PcDNObzBLQ69znpbvcXLND0L5FK+fZESvUhraT667CM9sBPNviLK/7yEVgI/xhwDPb7pUb6QX6y8Ar1TPoOj5Dz55xy8w36KvNVgvb3VEeM8+FtRvjqlmbzwgUc+epLBPMuOFLzMcnO8DWXUvSsWwDwcvD4+PreKvF9Vzb6rmt48pB4NvClszLxT8+i9ZzHdPPI/UL78Dt+8iBAvPn3fuzz+6AC8VwzDvFJPBb57lbo8HoFPvrZg2Lz3mx4+GmKZPOdS6rsUAL+8dYIVvis2mDwdx06+/uvWvL2SDj4ZQW48ERjLvjEcwLwV2Ng+WYzYOxcNTr6ncHW8DQP9Pd1ZKTsa9MC7SfVMvI4AMr4FoiE7KqZNvpz1grxORus9IzLLuiChs7tgRmC8vTA7vvOQ2bqnY0I+gRaOvMSk9b4ACQw7YDjFPqGx3LyUIke/zzohPHkGQz72ES69XdH8vkijXzw0fYS7YYVWvYXyW74cUF480yNLvuYdaL31h3k94k4dPLEIyb5NIGO9Q5+rPtwr5TpsgEm+qKpHvWRg0TwiVg+7U0fIvqeSRb2U3Zo+LgMkvAboFb9dyyy9FV4UPz/ysbz4nse+MaL6vJo7jD4y0/G8CAFHvlLCzbwOrue8M9UIvSw3x76FZNK8PUODPhG1KL12dhW/fGOovNV0Cj8OiVi91eDGvpaNH7wwj3c+HFt4vYHPRb4Nq6C7uxtdvSIXhL2Exca+pgvEu43Xcj76/ZO9t6ZFvquBorqfKmu97eWbvQy4xr6sgRy7z4NwPrDLq70gmUW+PloXO+/Zb70Ys7O9TQ4LOz8zlToodrO+2JyzvRCmRb4macC7zWRrvcSEu731rsa+4BLmu7T0bj7Oacu9wHVFvrBIGrsaC3y9y0/TvXedxr4V8Gq7WO9rPm4z472BX0W+KB6GOjDbgb2IGOu9spjGvntkgLkQHGs+yfv6vaI/Fb8Vco47Oa8FP4FuCb5LoMa+xFZyPLxvbD5wYBG+wapFvtL/njzZ0mm9flQVvirgxr53pZU88nR3PvpIHb7pN0a+TD29PGIsOb3bPyG+Yi3Hvh/VtTxxZII+bjcpvpzhRr7djt88h1v9vLQxLb7oHgM6rH3aPIc3ob4VLy2+z6tHvsnmpjzZymO8Zi0xvp6JL7mjn6Q8SKWZvkYuMb59Q0i++OlmPB0AlbqgLzW+8SgrupyKZjxZJJS+DDM1vg+tSL4Huwc8aN/9O4I2Ob7/TMi+8UQKPBwnmz6XOUG+vutIviyRbTytX1U8Tj5FvsF3yL6l1XE8hNiePhhDTb5lWUm+eL+rPBBOtjwASlG+4uPtut5krzwSCYe+hFNRvjP4Sb7ELoQ8beMRPZpdVb72AMm+qgSKPHusqj7hZ12+yHNKvkKiwDwohDw9lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAMJAwr11H2m+mmZkvfW4Mz7vk8u9lQjYvuMFVr22cuY+TdzcvfyLZ767JjG9g80QPlcf5r03Fvm8LJElvZKMKL4sXue9NlNmvg4NM71Ipus9s5TwvcZC77wBoCm9l6tDvvTG8b3gFGW+VUc5vcG9tD3A8Pq9yBjWvosMMr1Xj7s+uggGvuDIY74WChS98dR2PfyWCr79v9u8TxoPvb9yeb6fIwu+yL5ivgIPI72hEhs9j6wPvrf41L4J9R+9XayiPmMxGL5xl2G+8O0FvZK2VDxqtBy+gpTKvKrdBL0vZZS+ETYdvoSjYL7vmxy9lpj3uzi0Ib5B7tO+ZTodvWKviz5jLiq+jIRfvuXgBr246AG9zKYuvmxp074Hegm9LzSAPqgbN755i16+me3pvKDGV70Wjzu+mvXSviWP8rw6Zmw+T/9DvshRG786vMy8OZkDP0BsUL6Yj9K+PwZxvAu7Wj5l2Fi+Uy0bv9UHK7yMaQA/bEJlvhgXTb81NtW5Bn5KP6uqdb43Ixu/+oZ8PL8Q/z7xCYG+VHzSvnPizzwjcFc+oT+FvheMXb7KWvI8/OyXvcp2h75ng7G8WjPmPGD7tr6Yr4e+MGRevnilqzwVSGW96+iJvqk/076heaI8jRR5PoMijr79/F6+9VPKPK6dML1cXZC+x5LTvmhDwzymtoM+npiUvqnSG79gae08/q4OPz7Umr4q9dO+U10kPXJCjD53EZ++ERIcv1vOOj3uORQ/oU+lvqSU1L4OPWo9PCGaPgqQqb6cPGK+G3OBPe6K3Dw106u+U3TVvmaNgj3gfq0+GBiwvtoWZL6XbpA9eReJPQBgsr4zbda+gCyTPRgUwz7eqba+HSpmvrXHoj23BOU9Fve4vonm/LxIXKc9lnkevgRIub4mg2i+gAWhPZJxJj4/m7u+6q/YvuKtpz3ROfU+r/C/vkvYar4bTLs9wkZaPuNJwr7k7dm+QgfEPf6BCD+vpca+Xo5tvp3e2T0LW4s+1AXJvlHUHb2eBOU9r1lzPNdqyb7giR4+XaDlPX5Vdr771Me+k0SyPufF2z3k4/2+QETEvrdcGz47dsc93oIvvoa2wr5FzrA+/3DAPcYZ3b5HLb++7ZQYPtvArj3AeuO9q6a9vgdzQb0pNKo9wDxUPnoivr6TIRY+eLGyPcLvbb0kory+t1hLvVpQsD0Xh4U+SCS9vtKbEz4A/7o97e9nu2equ77P+qw+5Nm6PfM6iL6/NLi+VxQIP+Xzrz06VQe/S8OyvuCxqz6oTJo9wDdXvjdUr76wkQ4+1LCRPWCj1z09562+TKGqPuUAlj2vECi+nX2qvrb6Bj/oR489Upzdvm0Xpb74qDg/pRp7Pf/hM7+CtJ2+coEGP6aKQT3cXMi+K1OYvkrIqD7KeyE9B0+svQHzlL41MQY/WZcaPWNuur7glI++RgA4P0CG+TwlCSW/tDiIvtTxBT/N5o88NWOvvhzdgr523Kc+G44vPLLLNb1SA3++kdcFP+4CITxf1qq+PE50vvW1pz66s047KD4bveGYbb4Nkgc+PQYdO/v0gj7C4mq+jqunPhgj9jv9EhS90S1kvgltBz76cd47jo2EPm94Yb7BkKc+dg5EPJaaAb2QxFq+g7AFPy2wOTzdG6S+mhJQvlOZNz8qUaE7jw8cv39iQb5ToQU/1TLuu+F5ob7AsTa+TGmnPqhxXrysz8y8dv8vvr+0BT/toma86NSkvilNJb42tTc/eRCovHd4Hr/Tmha+GrtpPyW+Br2FCGu/BOgDvrnlNz8S9FG9hcwiv5hj6r2KBWo/RwaDvSe+cb8R8sS9J044PxC0qb34ISy/7HSnvSWsBj+ePsW9L1bQvr3okb1TNKo+WunVvZKUFb70SoS9wGQHPw7l270zifC+eEJdvQq9qz47I++9ReZZvhDIQb0BkBE+htr3vSWCJz3xIja9y3StPrMt9r1lJJO+LWIavd8NFT6W+QC+Z/UOvYx1Dr3yxEK9k7ABvkW2XD7EWhK9MbYYPg6N+r2lAOq9OyMGveRmNL0mO/+9lMIMPuO+Cb0iSRw+xJn5vfVlRL5QfPq8eyYmvXG6AL7bI3Y92ZAAvedJb77E/v69hgmfPnq1E71Nidq+r0XyvT3EDz+6rDa9/clrvgFF271JuG8+sIlJvRxkCr1GrtG9JwW5vT9OTL1Kwmi+lGHVvepnLD4n7V69CXLXviR8zr1IV9o+5rKAvWLIZb6EBL292n/UPeDjib0PD9a+hcS4va9euz7MA5u98J8cvzDHqb2Q/iA/JhO0vdHO1L7bBJC9HGGfPnQZxb3jFBy/xESDvc+7FD+OEt69EdXTvkfxVr1qo4k+4QTvvXciX76c60C9uJQjvcfx9726G9O+JTFEveE4cz6kagS+6L5dvvS7ML2VG4+9+tkIvgdy0r5hdTa9W+ZVPvBEEb6ydly+tVglvauyx722rRW+ltPRvp5VLb2/ijo+LAizPOXOOD6gfmc7aPOLvuqZ0DwjxXC8Eo/9uiGOpzyLMc48FdNWvvPwx7pWA6A+VNKrPIv1b7zV1Jo7vJyePAlsqTzbvDg+NoWnO9ksi77k+sY8f0NyvGP7qbleB7g8so7EPAyrOD5VJgM5e2eKvrQa4jx8N3K8GQ+tu5uDtzyhrt88Vb44PrtgnrsbPIu+uT39PDBPwD6YTCi8+O4Qv9pjHT1+9jg+MuiwvOKtjb7kLyw9OFFovI8+3rwuuBQ8hgYrPSisOT7Wwdy8VoaVvhnhOT0YqFu8dE0GvbixArzwxzg9sYo6Pr70Br0UJJ++ULRHPWtFTLwnayC9JTDrvNiuRj2W+VO+PMUivcOygD6ZuTU9wM05vMEtDr3Oelu9xcs0PQvkUr59kRK9SGtpPrrsIz2wE82+Isr/vIRWAj/GHAM9vulRvpBfrLwCvVM+g6PkPPnnHLzDfoq81WC9vdUR4zz4W1G+OqWZvPCBRz56ksE8y44UvMxyc7wNZdS9KxbAPBy8Pj4+t4q8X1XNvqua3jykHg28KWzMvFPz6L1nMd088j9QvvwO37yIEC8+fd+7PP7oALxXDMO8Uk8FvnuVujwegU++tmDYvPebHj4aYpk851LquxQAv7x1ghW+KzaYPB3HTr7+69a8vZIOPhlBbjwRGMu+MRzAvBXY2D5ZjNg7Fw1OvqdwdbwNA/093VkpOxr0wLtJ9Uy8jgAyvgWiITsqpk2+nPWCvE5G6z0jMsu6IKGzu2BGYLy9MDu+85DZuqdjQj6BFo68xKT1vgAJDDtgOMU+obHcvJQiR7/POiE8eQZDPvYRLr1d0fy+SKNfPDR9hLthhVa9hfJbvhxQXjzTI0u+5h1ovfWHeT3iTh08sQjJvk0gY71Dn6s+3CvlOmyASb6oqke9ZGDRPCJWD7tTR8i+p5JFvZTdmj4uAyS8BugVv13LLL0VXhQ/P/KxvPiex74xovq8mjuMPjLT8bwIAUe+UsLNvA6u57wz1Qi9LDfHvoVk0rw9Q4M+EbUovXZ2Fb98Y6i81XQKPw6JWL3V4Ma+lo0fvDCPdz4cW3i9gc9Fvg2roLu7G129IheEvYTFxr6mC8S7jddyPvr9k723pkW+q4Giup8qa73t5Zu9DLjGvqyBHLvPg3A+sMurvSCZRb4+Whc779lvvRizs71NDgs7PzOVOih2s77YnLO9EKZFviZpwLvNZGu9xIS7vfWuxr7gEua7tPRuPs5py73AdUW+sEgauxoLfL3LT9O9d53GvhXwartY72s+bjPjvYFfRb4oHoY6MNuBvYgY672ymMa+e2SAuRAcaz7J+/q9oj8VvxVyjjs5rwU/gW4Jvkugxr7EVnI8vG9sPnBgEb7BqkW+0v+ePNnSab1+VBW+KuDGvnellTzydHc++kgdvuk3Rr5MPb08Yiw5vds/Ib5iLce+H9W1PHFkgj5uNym+nOFGvt2O3zyHW/28tDEtvugeAzqsfdo8hzehvhUvLb7Pq0e+yeamPNnKY7xmLTG+nokvuaOfpDxIpZm+Ri4xvn1DSL746WY8HQCVuqAvNb7xKCu6nIpmPFkklL4MMzW+D61Ivge7Bzxo3/07gjY5vv9MyL7xRAo8HCebPpc5Qb6+60i+LJFtPK1fVTxOPkW+wXfIvqXVcTyE2J4+GENNvmVZSb54v6s8EE62PABKUb7i4+263mSvPBIJh76EU1G+M/hJvsQuhDxt4xE9ml1VvvYAyb6qBIo8e6yqPuFnXb7Ic0q+QqLAPCiEPD1vdGG+/wlAu6osyDwX43S+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.7145386338233948, 0.3301219940185547, 0.8899467587471008, 0.6370378732681274, 0.7793131470680237, 0.5874241590499878, 0.8089688420295715, 0.46758198738098145, 0.8422499299049377, 0.5003751516342163, 0.8467786908149719, 0.550575852394104, 0.8031160235404968, 0.4204248785972595, 0.8743100762367249, 0.6273601055145264, 0.7532482743263245, 0.65560382604599, 0.7326817512512207, 0.6786774396896362, 0.28631067276000977, 0.9049944281578064, 0.2818832993507385, 0.09157051146030426, 0.9566946029663086, 0.9197820425033569, 0.7836957573890686, 0.4231114983558655, 0.857382595539093, 0.5579886436462402, 0.8157505989074707, 0.5205936431884766, 0.1657259464263916, 0.9400999546051025, 0.1377422958612442, 0.9469895958900452, 0.8893464207649231, 0.30538681149482727, 0.9083792567253113, 0.23912914097309113, 0.9237581491470337, 0.817291259765625, 0.45908769965171814, 0.14540739357471466, 0.9435144662857056, 0.11238325387239456, 0.9506571292877197, 0.9121183156967163, 0.8074678778648376, 0.5779584646224976, 0.6944699287414551, 0.6532284021377563, 0.630101203918457, 0.2886457145214081, 0.8928754925727844, 0.23312027752399445, 0.9082863926887512, 0.8120977878570557, 0.6071277856826782, 0.6598420739173889, 0.34575900435447693, 0.8603193163871765, 0.7030950784683228, 0.43988409638404846, 0.7909899353981018, 0.5444848537445068, 0.7443684339523315, 0.4748729467391968, 0.7838408946990967, 0.5340209007263184, 0.7517281174659729, 0.5389045476913452, 0.2501910924911499, 0.8950940370559692, 0.24435243010520935, 0.897934079170227, 0.7639405131340027, 0.47292789816856384, 0.8035104274749756, 0.5599677562713623, 0.7422976493835449, 0.41125062108039856, 0.15176455676555634, 0.938137412071228, 0.11105752736330032, 0.9521937370300293, 0.9244604706764221, 0.8477113842964172, 0.36517229676246643, 0.8995999693870544, 0.7670295834541321, 0.5389034748077393, 0.8523119688034058, 0.6268474459648132, 0.7116072773933411, 0.7535645365715027, 0.5744153261184692, 0.8408138155937195, 0.5867356657981873, 0.25469496846199036, 0.9003883004188538, 0.6576654314994812, 0.7835851907730103, 0.4603838324546814, 0.8245253562927246, 0.5588733553886414, 0.22580257058143616, 0.9132584929466248, 0.2633396089076996, 0.904869019985199, 0.7092060446739197, 0.7081540822982788, 0.6708568334579468, 0.7411358952522278, 0.6319013237953186, 0.7692068219184875, 0.6107634902000427, 0.7839415669441223, 0.39703473448753357, 0.8809493780136108, 0.6164395213127136, 0.7797384262084961, 0.6107505559921265, 0.7852819561958313, 0.5989381670951843, 0.2057776153087616, 0.9290640950202942, 0.8207107186317444, 0.5182713866233826, 0.8403854370117188, 0.46897831559181213, 0.860327422618866, 0.5880143046379089, 0.7830023765563965, 0.6252779364585876, 0.24076998233795166, 0.918492317199707, 0.7558153867721558, 0.6562982201576233, 0.743545413017273, 0.329177588224411, 0.8988929390907288, 0.7061043381690979, 0.6914294958114624, 0.728039026260376, 0.6665208339691162, 0.7485456466674805, 0.3601571321487427, 0.8857332468032837, 0.6377137303352356, 0.7662349939346313, 0.6207844614982605, 0.22204458713531494, 0.07517490535974503, 0.9601967334747314, 0.9349498748779297, 0.8495979309082031, 0.5772209763526917, 0.7747247219085693, 0.6230380535125732, 0.2545163631439209, 0.9106013178825378, 0.7390216588973999, 0.6630235910415649, 0.27492284774780273, 0.9086012840270996, 0.7358675599098206, 0.6606889367103577, 0.7389239072799683, 0.656338632106781, 0.7449959516525269, 0.35186490416526794, 0.8877314925193787, 0.6601843237876892, 0.7403911352157593, 0.6570512056350708, 0.7450990080833435, 0.6503136157989502, 0.24748466908931732, 0.920200765132904, 0.7808467149734497, 0.5918764472007751, 0.8006954193115234, 0.5567577481269836, 0.821295440196991, 0.4851776659488678, 0.8356659412384033, 0.5061039924621582, 0.8285458087921143, 0.5198307633399963, 0.8241803646087646, 0.4732814133167267, 0.8542349934577942, 0.44508469104766846, 0.8658439517021179, 0.5906569361686707, 0.7823728322982788, 0.3923027813434601, 0.8830647468566895, 0.6449106931686401], "advantages": [5.34372615814209, 3.4772355556488037, 2.641378402709961, 3.393198251724243, 5.50675630569458, 3.498453378677368, 5.678830146789551, 3.6962521076202393, 2.296417474746704, 3.7592406272888184, 5.846028804779053, 4.012153148651123, 2.433779239654541, 4.092923641204834, 5.93383264541626, 4.364585876464844, 2.822707414627075, 4.435563564300537, 2.9776952266693115, 4.449057102203369, 3.104326009750366, 1.805916666984558, 2.97487211227417, 1.7299500703811646, 0.9767391681671143, 1.421822428703308, 2.063217878341675, 2.5110833644866943, 3.0324864387512207, 2.1789679527282715, 1.3903967142105103, 1.6036921739578247, 0.905038595199585, 0.15614436566829681, 0.16684097051620483, -0.425083190202713, -0.6813451051712036, -1.1224215030670166, -1.48048996925354, -2.1088414192199707, -2.3572349548339844, -3.1857850551605225, -3.7631912231445312, -4.1742448806762695, -4.231551170349121, -5.400660037994385, -5.330511093139648, -6.695231914520264, -7.817572116851807, -8.301892280578613, -8.01997184753418, -9.54063606262207, -9.350038528442383, -10.8381929397583, -11.669363975524902, -12.334385871887207, -13.227333068847656, -13.911215782165527, -13.977618217468262, -13.32863712310791, -15.46882152557373, -17.1589412689209, -17.140392303466797, -16.56845474243164, -15.080556869506836, -18.081636428833008, -20.54050064086914, -19.756214141845703, -18.168752670288086, -21.35610580444336, -24.229143142700195, -23.090972900390625, -26.15956687927246, -29.049827575683594, -28.207744598388672, -31.165536880493164, -30.333826065063477, -28.902372360229492, -26.87515640258789, -30.78998565673828, -34.42581558227539, -32.72133255004883, -30.247812271118164, -26.430543899536133, -31.429292678833008, -26.980817794799805, -32.148597717285156, -36.648887634277344, -40.435855865478516, -37.44397735595703, -41.3135871887207, -44.285980224609375, -42.04058837890625, -45.176239013671875, -47.27995681762695, -45.952308654785156, -48.31342315673828, -46.48448181152344, -49.06890106201172, -50.6634521484375, -51.27536392211914, -51.82233428955078, -50.408817291259766, -52.61117935180664, -53.92329406738281, -53.3603515625, -55.02437210083008, -55.7225456237793, -56.133888244628906, -57.18886947631836, -57.2010498046875, -55.85426712036133, -58.09061050415039, -56.71928405761719, -58.945430755615234, -57.6478271484375, 12.230320930480957, 12.367692947387695, 11.885704040527344, 12.243776321411133, 11.499811172485352, 11.624293327331543, 11.139490127563477, 11.263777732849121, 10.773929595947266, 10.906808853149414, 12.110933303833008, 10.617769241333008, 10.080604553222656, 10.311936378479004, 9.72043514251709, 10.033855438232422, 9.370911598205566, 9.55990982055664, 8.975624084472656, 9.086871147155762, 10.301041603088379, 8.553838729858398, 8.127638816833496, 8.087565422058105, 7.722953796386719, 8.489952087402344, 7.385662078857422, 7.220555305480957, 7.0152482986450195, 6.764087677001953, 6.660372734069824, 6.316971778869629, 7.056035041809082, 5.808440208435059, 5.913172721862793, 5.369409084320068, 5.582090377807617, 7.242582321166992, 10.706400871276855, 7.521082878112793, 5.484389305114746, 4.586575031280518, 4.8134355545043945, 4.327906131744385, 4.34528923034668, 5.550821781158447, 3.8153395652770996, 3.873429536819458, 3.444118022918701, 4.303328514099121, 2.992663860321045, 3.5000691413879395, 2.664701223373413, 3.3611855506896973, 2.3582327365875244, 3.224130392074585, 5.521113395690918, 3.248332977294922, 1.960426926612854, 3.1943047046661377, 1.7602624893188477, 3.1233749389648438, 1.569790244102478, 1.2972478866577148, 1.2165945768356323, 2.6268157958984375, 0.9137896299362183, 2.301605224609375, 0.5635809898376465, 1.8988845348358154, 3.6868131160736084, 1.597379207611084, 3.3193776607513428, 1.3135194778442383, 2.9633233547210693, 1.0475983619689941, -0.6342313885688782, 0.5902262926101685, -1.068387746810913, 0.04324698820710182, 1.3158276081085205, -0.3969970941543579, -1.9769048690795898, -1.0476150512695312]}
+{"type": "SampleBatch", "eps_id": [1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1936018840, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAG90Yb7/CUC7qizIPBfjdL7Mg2G+cCdLvhf+oDzEb3o98pNlvnArabuPAqs84q5mvpqmZb5Iv0u+xxmGPAxilz3JuWm+x/eFuxo2kjx0rlq+OM9pvj1aQz7RcV48byQAvwTnZb40qZS7kbBpOy6DUL7N/mW+qIBMviTTBLposLg92xVqvo8Wlrta/ak6xIZPvt4tar56GEM+laM0u0Jo/b77Rma+XmuVuyRXT7x+/0++415mvl5JTL4r84i8KjCvPdV0ar7I68m+ot51vCftvj6BiHK+fNJLvoJa97udrZo9E5x2vvrByb5G28W7h0y7PhOufr6Mn0u+XounOvrkkT1QYIG+qpN1uwglMTvKWGK+I2qBvnWuS76bKOG69HaUPY9zg74Fu8m+036MuSyxuj5sfIe+/qZLvlUv5js+LpM9xYWJvrzhfLsvpAo8y9ZfvuOPib4E4Uu+lQaGO7UtnT3RmYu+MN/JvqZSuDu9zr0+ZqOPvmQFTL5lo1U8e3WjPbGtkb4sH427rMpvPHK8Vb78uJG+E21MvmllKzzWUrU9UMSTvqPXmLttaEg8qKVNvovQk74Vwky+0ZkGPJ/4wz253JW+2WSiu8v0JTxYDke+t+mVvswGTb5AhMw7qc/PPZX2l77oCqq7EoIHPFfHQb4vBJi+b15CPnL/kju7ZvW+mhKWvucNxT6mHae7wENFv68hkr5BYEI+QQeovN1/9b4UMJC+zAqku5KW9ryo+0W+ND2QvjJ6TL76IQu9zbG3PapIkr77HYa78sgDvQqiWr5lU5K+uxFEPoxGFb1fIwS/dV2Qvgc9TLtOjz+9aNRwvqBlkL7yR0q+gNNSvaidLT13a5K+fqXIvpZaT73OBKM+x26Wvt7JSL5WRTW9u+omPMtwmL5eA7q5r280vXmdl766cZi+LQ5IPtexTL3DNBq/lXGWvjMugDpnCn692NOmvgVvlr7dpkk+1F2MvZAtI7/KapS+lQE5O5l5pr2r8bu+ZGOUvg2kQ76xgrW9rwnPvTtYlr7hBMW+uqa5vQlDJj74SJq+whkUvzQAs72qqtk+hTWgvmO5w75klqG91ovZPaAfpL6jgBO/jjydven6vj4NBqq+7ZbCvkj1jb2NCmo9WuqtviNuPL4jnou9216DvrvMr75HlsG+myCWvd7JYjzmq7O+FWA6vnaPlb3HIZq+BYm1vjiFwL4V5KG9bhoEvbliub5X5hG/RDajvcEbcD69OL++qIlDv46bmb2KQQA/CwvHvusydb8xFoW9COZEP+HZ0L4HdJO/cypLvSA1hT+5pdy+vct0v3bT67wlnDs/bnDmvvO+Qr/ngme8rMzcPqE67r6DvhC/OWa0u2ddCT7PBPS+uYu9vrr4OLukHSS+Sc/3vri2EL8YhcW7q60GPieZ/b6KpEK/xqZeu1A32D4msQK/4q0Qv0xupTtMoQM+6JUFvxymQr+RrPk7ZHvYPoJ6Cb+6uRC/WbGDPO65Bz6AXwy/F6u9vrJomTxZvh6+DUUOv0zaEL+PAoA84fMSPrMqEb8+3EK/vYWXPDrV4T5iEBW/M/oQv/nJ3zy//B0+q/YXvwNCvr4iEfk8tMQEvrrdGb8uMBG/8NLjPKaZMD4XxRy/7K6+vj4KAD3s++O9Pa0ev2VnEb9e1+084KRDPrWVIb+/H7++eZIGPWAVvb38fiO/NqERv4UE/jyBl1c+nGgmv9ywQ7+UQRA9dkkDP4xSKr/93hG/m0Q6PdIDbT5oPS2/2y/Avqw6TT1+nz69aCkvvxg4Er+vakk9O+aFPgwWMr+G6cC+M9dePdTsebznAzS/h5gSv0yXXT1hkpY+efI2v2G0wb63rnU9ay+bPFviOL8MiTy+/Tt3PSgqgr6v0zm/ZpXCvnJoYj2Y7Gg90cU7v3c3Pr4EEWc9Pxtfvku5PL9DZsO+zDdVPYCGvD2ErT6/6dQTv0zCXD3cJ80+aqJBvzYsxL56lX09pH0APp6YQ7+LgEG+fO6DPYOTFr5MkES/IJ2pOyrRez33Y9a+hIlEv79WQ767g1k9mMnbvYyDRb/za2k7G7lQPXP8w77hfkW/9NtEvoRdMT1/iZi93HpGv8Cixr6JQys9sRltPl53SL9oGka+Wjs+PSg6Q73wdEm/fkZ9OspTOj3Bkqa+rHNJv5ByR77yrB89MuWYvPdySr/7W3K5iSUePeb7mL5Fc0q/xftGPlCrBT0YPRS/knRJv4cLo7o0d6w87nONvjN2Sb8xYUm+xWZ+PKUxvDz3d0q/ZTTnuvD2gjwKkIe+R3pKv0/XSb5KKy88E84GPaJ8S7/2dwy7GfQ5PCJEg75yf0u/OHFFPubiyztRowu/uIJKv67mHLs6lpm7/WuAvtuFSr8GMEq+v/sevE1iJT2oiEu/s3YPu6/AEbx1vYK+hotLv1DqSb4ibWW8hVwNPfqNTL+uLvS6Dh5avLlthr5rkEy/8YNJvmsTmLwLKdQ8zcFMO4gbFDzDc/g8KbMavQubWDvS+j6+o0PyPN9Vhz6qyd65AxYGPCjJDj2FsZq8BPmIufGnTz4kPQ09uMmZvpqteDtpeMs++UPpPI/8FL/7Y0A8SL1OPgTqiTzqmI++CUaBPD8r1jsM7Tc86TSPPCxYgjy7VE4+fKc9PDoUi759W6M8fvvKPo9JyTulhg+/zk/kPHkSTj6dI6a77DWIvkOkAj2z+co+jz4qvMBwD78iHiM9+ktOPo/ssLxqtoq+F58zPfcN2jvkT928BaRzPKUqND1EAU8+K+DavGWKkr4bukQ9FjfzO2PiBL2F7we7xFVFPYS2P77jDQW9N3aPPnz/NT1jzwg8YzPcvKG6uLyarjY9Y9k+vjPl37yR54U+AmonPV6BFTy9C7W8CWIivWApKD1bd1E+ioq7vOWtrb4/6zg9CwogPFge87wAiFy9GLg5Pdw6Uj6W8Pu8NSO2vpqJSj1VLi48pRwbvRZRlb2OaEs9n1c8vqYVIb1/klQ+Tlc8PUc+QDwrFBC98SLHvWBNPT1aUVQ+VAsYvUE2zb6lSU49fCVRPM/gOL3R3fW9WlVPPdEKOr57tkK9etkhPjVzQD0nj8C+zcM1vQHp3j77oyE9LbM4vmcZEr0eIgQ+VN0SPVP7v75Qhwe9Hg3SPoNL6DzTsTe+OtfLvObO2z1i58o8u7qDPI5BurwKvEW+1onNPIv/Nr7D5Nm8shS9PTtCsDzzfYk8Y8TKvBijVb4wArM8Rj82vvby7LwS8Zs9WNmVPMjOvr5HeeC8hRi4Pr+UMTxFbDW+JpClvHwRbz0qDe87i2GVPBcAnLwkZna+fQD7OzXZNL6YbMO8BmI8PUZChzvGYpo8jeO7vCscgr4ZnJM76yk0viqG5bxT6/88RziBOq5ToDzcZ+C8B1GKvkGGtDpdWjO+YFUGvRqYYTxCTwu7mFS9vp00Bb22g5c+t/8bvHZ+EL8p7dm8fwMUP8F5qrxh5ry+NWV2vMXxjT5v7Oa8UscxvgmNG7xzF6W8H68Bvay4vL6SJyK8ef6JPibhH72jfjG+Qq2Tu+gw17w+FC691p28vl/kpLuprIc++UFMveNaMb7tVow5p9XvvDVyWr34lLy+EqamuUDohj6Fnni9sz0QvwZEojvTWg4/X2OTvb81Qr9qrIM8Ea5ZPzZ2sr3WUBC/jH4HPdwOED9jjcm9uWFCv8mXNT3sml0/Q6fovYiXEL+tgXw9W1QWP7/J/737v72+Vk6WPbIQoT7qewe+/tY0vvQwoz0jEjw90BkLvrvbvr5qEqU9mau5PjS8Er6HLDe+8eyzPSd3xT0NZha+XchzPPfftz3pUSu+CxgWvpydVT6mBbE9N1ncvlTSEb5aCM4+6WSfPUvRMb+OlAm+kD9TPoHxgj1RxsG+91oFvrU3KTz/4WY9T9eHvdAkBb5MglE+/HJhPUhxrr4h9AC+3SsPPNKJRT2Xf/+8UMYAvpmHP76++0I9v3iNPvKaBL6th/E7bJ5ZPRYfgbpNdAS+aYhOPsOJWT3yiY2+2lIAvtrCyj5V5EI9NEoNvyht8L09EU0+3q0VPYh/er5EOei94u+XO6qjAT3aHXM9pgjova6BQ75sgAY9zUS5PqLa7708hXM7ByUkPRg7oz2ss++9tf9KPoOsKj3D30y+95TnvZ4VyT6xSBo9GWr1vsF+170/10k+CAnmPHU2M77na8+99W3ROntcyTy0bQE+JlvPvWESST7fEd48Ojwivi1Qx70gZWE6txzEPPwZEj4pR8e9WURHvgh92zy2uOI+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAMyDYb5wJ0u+F/6gPMRvej3yk2W+cCtpu48Cqzzirma+mqZlvki/S77HGYY8DGKXPcm5ab7H94W7GjaSPHSuWr44z2m+PVpDPtFxXjxvJAC/BOdlvjSplLuRsGk7LoNQvs3+Zb6ogEy+JNMEumiwuD3bFWq+jxaWu1r9qTrEhk++3i1qvnoYQz6VozS7Qmj9vvtGZr5ea5W7JFdPvH7/T77jXma+XklMvivziLwqMK891XRqvsjryb6i3nW8J+2+PoGIcr580ku+glr3u52tmj0TnHa++sHJvkbbxbuHTLs+E65+voyfS75ei6c6+uSRPVBggb6qk3W7CCUxO8pYYr4jaoG+da5Lvpso4br0dpQ9j3ODvgW7yb7Tfoy5LLG6Pmx8h77+pku+VS/mOz4ukz3FhYm+vOF8uy+kCjzL1l++44+JvgThS76VBoY7tS2dPdGZi74w38m+plK4O73OvT5mo4++ZAVMvmWjVTx7daM9sa2Rviwfjbusym88crxVvvy4kb4TbUy+aWUrPNZStT1QxJO+o9eYu21oSDyopU2+i9CTvhXCTL7RmQY8n/jDPbnclb7ZZKK7y/QlPFgOR7636ZW+zAZNvkCEzDupz889lfaXvugKqrsSggc8V8dBvi8EmL5vXkI+cv+SO7tm9b6aEpa+5w3FPqYdp7vAQ0W/ryGSvkFgQj5BB6i83X/1vhQwkL7MCqS7kpb2vKj7Rb40PZC+MnpMvvohC73Nsbc9qkiSvvsdhrvyyAO9CqJavmVTkr67EUQ+jEYVvV8jBL91XZC+Bz1Mu06PP71o1HC+oGWQvvJHSr6A01K9qJ0tPXdrkr5+pci+llpPvc4Eoz7Hbpa+3slIvlZFNb276iY8y3CYvl4DurmvbzS9eZ2XvrpxmL4tDkg+17FMvcM0Gr+VcZa+My6AOmcKfr3Y06a+BW+Wvt2mST7UXYy9kC0jv8pqlL6VATk7mXmmvavxu75kY5S+DaRDvrGCtb2vCc+9O1iWvuEExb66prm9CUMmPvhImr7CGRS/NACzvaqq2T6FNaC+Y7nDvmSWob3Wi9k9oB+kvqOAE7+OPJ296fq+Pg0Gqr7tlsK+SPWNvY0Kaj1a6q2+I248viOei73bXoO+u8yvvkeWwb6bIJa93sliPOars74VYDq+do+Vvcchmr4FibW+OIXAvhXkob1uGgS9uWK5vlfmEb9ENqO9wRtwPr04v76oiUO/jpuZvYpBAD8LC8e+6zJ1vzEWhb0I5kQ/4dnQvgd0k79zKku9IDWFP7ml3L69y3S/dtPrvCWcOz9ucOa+875Cv+eCZ7yszNw+oTruvoO+EL85ZrS7Z10JPs8E9L65i72+uvg4u6QdJL5Jz/e+uLYQvxiFxburrQY+J5n9voqkQr/Gpl67UDfYPiaxAr/irRC/TG6lO0yhAz7olQW/HKZCv5Gs+Ttke9g+gnoJv7q5EL9ZsYM87rkHPoBfDL8Xq72+smiZPFm+Hr4NRQ6/TNoQv48CgDzh8xI+syoRvz7cQr+9hZc8OtXhPmIQFb8z+hC/+cnfPL/8HT6r9he/A0K+viIR+Ty0xAS+ut0Zvy4wEb/w0uM8ppkwPhfFHL/srr6+PgoAPez74709rR6/ZWcRv17X7TzgpEM+tZUhv78fv755kgY9YBW9vfx+I782oRG/hQT+PIGXVz6caCa/3LBDv5RBED12SQM/jFIqv/3eEb+bRDo90gNtPmg9Lb/bL8C+rDpNPX6fPr1oKS+/GDgSv69qST075oU+DBYyv4bpwL4z11491Ox5vOcDNL+HmBK/TJddPWGSlj558ja/YbTBvreudT1rL5s8W+I4vwyJPL79O3c9KCqCvq/TOb9mlcK+cmhiPZjsaD3RxTu/dzc+vgQRZz0/G1++S7k8v0Nmw77MN1U9gIa8PYStPr/p1BO/TMJcPdwnzT5qokG/NizEvnqVfT2kfQA+nphDv4uAQb587oM9g5MWvkyQRL8gnak7KtF7Pfdj1r6EiUS/v1ZDvruDWT2Yydu9jINFv/NraTsbuVA9c/zDvuF+Rb/020S+hF0xPX+JmL3ceka/wKLGvolDKz2xGW0+XndIv2gaRr5aOz49KDpDvfB0Sb9+Rn06ylM6PcGSpr6sc0m/kHJHvvKsHz0y5Zi893JKv/tbcrmJJR495vuYvkVzSr/F+0Y+UKsFPRg9FL+SdEm/hwujujR3rDzuc42+M3ZJvzFhSb7FZn48pTG8PPd3Sr9lNOe68PaCPAqQh75Hekq/T9dJvkorLzwTzgY9onxLv/Z3DLsZ9Dk8IkSDvnJ/S784cUU+5uLLO1GjC7+4gkq/ruYcuzqWmbv9a4C+24VKvwYwSr6/+x68TWIlPaiIS7+zdg+7r8ARvHW9gr6Gi0u/UOpJviJtZbyFXA09+o1Mv64u9LoOHlq8uW2GvmuQTL/xg0m+axOYvAsp1Dxckk2/sLS4uijVk7wJkIu+C5tYO9L6Pr6jQ/I831WHPqrJ3rkDFgY8KMkOPYWxmrwE+Yi58adPPiQ9DT24yZm+mq14O2l4yz75Q+k8j/wUv/tjQDxIvU4+BOqJPOqYj74JRoE8PyvWOwztNzzpNI88LFiCPLtUTj58pz08OhSLvn1bozx++8o+j0nJO6WGD7/OT+Q8eRJOPp0jprvsNYi+Q6QCPbP5yj6PPiq8wHAPvyIeIz36S04+j+ywvGq2ir4XnzM99w3aO+RP3bwFpHM8pSo0PUQBTz4r4Nq8ZYqSvhu6RD0WN/M7Y+IEvYXvB7vEVUU9hLY/vuMNBb03do8+fP81PWPPCDxjM9y8obq4vJquNj1j2T6+M+XfvJHnhT4Caic9XoEVPL0LtbwJYiK9YCkoPVt3UT6Kiru85a2tvj/rOD0LCiA8WB7zvACIXL0YuDk93DpSPpbw+7w1I7a+molKPVUuLjylHBu9FlGVvY5oSz2fVzy+phUhvX+SVD5OVzw9Rz5APCsUEL3xIse9YE09PVpRVD5UCxi9QTbNvqVJTj18JVE8z+A4vdHd9b1aVU890Qo6vnu2Qr162SE+NXNAPSePwL7NwzW9AenePvujIT0tszi+ZxkSvR4iBD5U3RI9U/u/vlCHB70eDdI+g0voPNOxN74618u85s7bPWLnyjy7uoM8jkG6vAq8Rb7Wic08i/82vsPk2byyFL09O0KwPPN9iTxjxMq8GKNVvjACszxGPza+9vLsvBLxmz1Y2ZU8yM6+vkd54LyFGLg+v5QxPEVsNb4mkKW8fBFvPSoN7zuLYZU8FwCcvCRmdr59APs7Ndk0vphsw7wGYjw9RkKHO8ZimjyN47u8KxyCvhmckzvrKTS+KoblvFPr/zxHOIE6rlOgPNxn4LwHUYq+QYa0Ol1aM75gVQa9GphhPEJPC7uYVL2+nTQFvbaDlz63/xu8dn4Qvynt2bx/AxQ/wXmqvGHmvL41ZXa8xfGNPm/s5rxSxzG+CY0bvHMXpbwfrwG9rLi8vpInIrx5/ok+JuEfvaN+Mb5CrZO76DDXvD4ULr3Wnby+X+Sku6mshz75QUy941oxvu1WjDmn1e+8NXJavfiUvL4Spqa5QOiGPoWeeL2zPRC/BkSiO9NaDj9fY5O9vzVCv2qsgzwRrlk/NnayvdZQEL+Mfgc93A4QP2ONyb25YUK/yZc1PeyaXT9Dp+i9iJcQv62BfD1bVBY/v8n/vfu/vb5WTpY9shChPup7B77+1jS+9DCjPSMSPD3QGQu+u9u+vmoSpT2Zq7k+NLwSvocsN77x7LM9J3fFPQ1mFr5dyHM899+3PelRK74LGBa+nJ1VPqYFsT03Wdy+VNIRvloIzj7pZJ89S9Exv46UCb6QP1M+gfGCPVHGwb73WgW+tTcpPP/hZj1P14e90CQFvkyCUT78cmE9SHGuviH0AL7dKw880olFPZd//7xQxgC+mYc/vr77Qj2/eI0+8poEvq2H8Ttsnlk9Fh+Buk10BL5piE4+w4lZPfKJjb7aUgC+2sLKPlXkQj00Sg2/KG3wvT0RTT7erRU9iH96vkQ56L3i75c7qqMBPdodcz2mCOi9roFDvmyABj3NRLk+otrvvTyFczsHJSQ9GDujPayz7721/0o+g6wqPcPfTL73lOe9nhXJPrFIGj0ZavW+wX7XvT/XST4ICeY8dTYzvudrz731bdE6e1zJPLRtAT4mW8+9YRJJPt8R3jw6PCK+LVDHvSBlYTq3HMQ8/BkSPilHx71ZREe+CH3bPLa44j6nP8+9SrsOOQgFEj2PgSI+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.7421392202377319, 0.6653075814247131, 0.7267931699752808, 0.6819790601730347, 0.2869735658168793, 0.9037841558456421, 0.7229446768760681, 0.67928147315979, 0.2761118710041046, 0.9095240831375122, 0.7475538849830627, 0.35844656825065613, 0.8867701292037964, 0.35553956031799316, 0.8888583779335022, 0.6565616130828857, 0.7402483224868774, 0.34571242332458496, 0.892118513584137, 0.6718780398368835, 0.722542941570282, 0.32426518201828003, 0.8984048962593079, 0.6982182860374451, 0.6918348670005798, 0.7080796360969543, 0.6820647716522217, 0.7158459424972534, 0.6739458441734314, 0.7218491435050964, 0.3326181173324585, 0.11096330732107162, 0.950690746307373, 0.9050228595733643, 0.7428180575370789, 0.6320573687553406, 0.22915831208229065, 0.9250655174255371, 0.8140720725059509, 0.4927603006362915, 0.8256897330284119, 0.4635116755962372, 0.14022856950759888, 0.9471401572227478, 0.1091386154294014, 0.955327033996582, 0.9178064465522766, 0.789749801158905, 0.4668661952018738, 0.8222082853317261, 0.542384684085846, 0.7849184274673462, 0.3939862847328186, 0.8846608400344849, 0.31964564323425293, 0.905807614326477, 0.7489412426948547, 0.41083580255508423, 0.15646404027938843, 0.06817567348480225, 0.9601849913597107, 0.9351122975349426, 0.8529565334320068, 0.5891408324241638, 0.7689388990402222, 0.41001275181770325, 0.85971999168396, 0.3872164487838745, 0.8695835471153259, 0.6449781060218811, 0.712408721446991, 0.33459702134132385, 0.8875104188919067, 0.7030553221702576, 0.6440524458885193, 0.7298330664634705, 0.6086268424987793, 0.7550589442253113, 0.5702375173568726, 0.22106748819351196, 0.9190675616264343, 0.8118273615837097, 0.4568728506565094, 0.8366149663925171, 0.399812787771225, 0.858665943145752, 0.6573303937911987, 0.6431314945220947, 0.698617160320282, 0.5970705151557922, 0.2663327157497406, 0.8998541831970215, 0.7739797234535217, 0.5198624134063721, 0.7329416275024414, 0.5576813817024231, 0.70754075050354, 0.4125094413757324, 0.832767128944397, 0.6260417699813843, 0.6459827423095703, 0.6510291695594788, 0.3772663176059723, 0.8166376948356628, 0.6205098032951355, 0.6700549125671387, 0.6124781966209412, 0.6753755807876587, 0.39235028624534607, 0.8146733045578003, 0.6239195466041565, 0.6599335670471191, 0.6341677904129028, 0.6481558680534363, 0.6476911902427673, 0.6325516104698181, 0.4194265902042389, 0.8801625967025757, 0.6284494996070862, 0.24167421460151672, 0.9156409502029419, 0.76093590259552, 0.6439739465713501, 0.24204489588737488, 0.9186106324195862, 0.22265751659870148, 0.9252332448959351, 0.8064363598823547, 0.54755038022995, 0.8280000686645508, 0.5013532638549805, 0.8330576419830322, 0.5301783680915833, 0.8211783170700073, 0.44648849964141846, 0.8661826848983765, 0.4002903997898102, 0.8819801807403564, 0.6532848477363586, 0.735798180103302, 0.3108850419521332, 0.9062906503677368, 0.7409862279891968, 0.3635069727897644, 0.8795132040977478, 0.389970988035202, 0.8734208345413208, 0.5946699976921082, 0.7963769435882568, 0.5623757243156433, 0.8139426112174988, 0.47416436672210693, 0.8425362706184387, 0.5129656195640564, 0.8352435231208801, 0.4811350703239441, 0.8488247990608215, 0.4439389109611511, 0.863001823425293, 0.5984823107719421, 0.22694513201713562, 0.9211893081665039, 0.7793283462524414, 0.6054004430770874, 0.7798303365707397, 0.6031936407089233, 0.783858597278595, 0.5954858660697937, 0.20878595113754272, 0.06996893137693405, 0.9638121128082275, 0.05880429223179817, 0.9675294160842896, 0.9519604444503784, 0.9039739370346069, 0.2654876410961151, 0.9250361323356628, 0.810467541217804, 0.5005314350128174, 0.19608362019062042, 0.9199252724647522, 0.7778365015983582, 0.6221711039543152, 0.7432409524917603, 0.3320571482181549, 0.9028380513191223, 0.7243411540985107, 0.35484206676483154, 0.8742329478263855, 0.6252550482749939, 0.22858230769634247, 0.9254031181335449, 0.8040980100631714, 0.4699220657348633, 0.83076012134552, 0.5142132639884949, 0.8305869698524475, 0.4852646291255951, 0.15715593099594116, 0.940685510635376], "advantages": [9.962183952331543, 8.465002059936523, 9.536214828491211, 8.101694107055664, 9.097320556640625, 10.37242317199707, 8.880951881408691, 7.589009761810303, 8.502288818359375, 9.741464614868164, 8.37849235534668, 7.243323802947998, 6.102297306060791, 6.795323848724365, 5.734568119049072, 6.275592803955078, 6.856711387634277, 5.912126064300537, 4.992595672607422, 5.31371545791626, 5.710473537445068, 4.871220588684082, 4.086394786834717, 4.187877655029297, 4.404812812805176, 3.656235694885254, 3.8152549266815186, 3.1065804958343506, 3.210392475128174, 2.5391974449157715, 2.5904624462127686, 3.211838722229004, 4.84514045715332, 3.16235089302063, 2.2554564476013184, 1.6845893859863281, 1.8318346738815308, 2.563819646835327, 1.7302453517913818, 1.2629121541976929, 0.7745552062988281, 0.7657491564750671, 0.8879890441894531, 1.7493469715118408, 0.93590247631073, 1.9783265590667725, 1.1151940822601318, 0.8954168558120728, 0.775594174861908, 0.3050791025161743, 0.46187418699264526, 0.13241349160671234, 0.1359269618988037, -0.1398974061012268, 0.032029103487730026, -0.26948264241218567, -0.053917258977890015, -0.0021848438773304224, -0.4351915717124939, -1.343087077140808, -2.5623273849487305, -1.8149359226226807, -1.5570367574691772, -1.8669129610061646, -2.5702195167541504, -2.30163311958313, -2.4784204959869385, -2.9450674057006836, -3.109083890914917, -3.6538853645324707, -4.486635684967041, -4.30039119720459, -4.466681003570557, -5.137042999267578, -6.019649982452393, -5.9258131980896, -6.823407173156738, -6.780771732330322, -7.698112964630127, -7.696495532989502, -7.8424973487854, -8.747162818908691, -9.762453079223633, -9.752503395080566, -10.830366134643555, -10.793676376342773, -11.949895858764648, -12.873753547668457, -13.030129432678223, -13.986685752868652, -14.163536071777344, -14.018338203430176, -15.399600982666016, -16.513521194458008, -17.135520935058594, -17.702316284179688, -18.332582473754883, -18.960052490234375, -19.045076370239258, -20.366775512695312, -21.085317611694336, -21.73343849182129, -22.483348846435547, -22.410316467285156, -23.800878524780273, -24.59133529663086, -25.32060432434082, -26.15968894958496, -26.890260696411133, -26.700393676757812, -28.359477996826172, -29.394014358520508, -30.01532745361328, -31.133323669433594, -31.70237159729004, -32.913211822509766, 16.74986457824707, 17.005142211914062, 16.46055030822754, 16.680124282836914, 17.812210083007812, 16.40336036682129, 15.946146011352539, 16.11150360107422, 17.23311424255371, 15.871072769165039, 17.063596725463867, 15.668039321899414, 15.206764221191406, 15.446927070617676, 14.943414688110352, 15.255030632019043, 14.6339750289917, 14.896696090698242, 14.321704864501953, 14.723248481750488, 14.059152603149414, 14.552416801452637, 13.8137845993042, 13.92780876159668, 13.52879524230957, 14.27650260925293, 13.32952880859375, 13.286197662353516, 14.286084175109863, 12.876018524169922, 13.755963325500488, 12.480130195617676, 12.534699440002441, 12.164260864257812, 12.34476089477539, 11.869222640991211, 12.453166007995605, 11.520573616027832, 11.986315727233887, 11.262653350830078, 11.902769088745117, 11.045820236206055, 11.89995002746582, 10.888172149658203, 11.019709587097168, 12.389384269714355, 10.562883377075195, 10.405895233154297, 10.220525741577148, 10.210987091064453, 9.893479347229004, 10.020485877990723, 9.576271057128906, 10.529535293579102, 12.597681045532227, 10.005120277404785, 12.116042137145996, 9.55986213684082, 8.058792114257812, 8.062898635864258, 7.5842671394348145, 7.372654438018799, 8.368803024291992, 10.201176643371582, 12.549418449401855, 9.340702056884766, 6.770698070526123, 8.483539581298828, 6.023033142089844, 4.886241912841797, 5.2277727127075195, 6.650463104248047, 8.920669555664062, 5.800436019897461, 3.7454683780670166, 3.0560812950134277, 2.9703521728515625, 3.977858304977417, 5.986214637756348, 3.1176180839538574, 1.4986090660095215, 2.2507383823394775, 0.7635002732276917, 0.5527420043945312]}
+{"type": "SampleBatch", "eps_id": [1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1940147822, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAKc/z71Kuw45CAUSPY+BIj46Ps+9SCtIvigFHz0Hwew+9T/XvcqKcrqZ5kQ995U6PqhJ173bHkY+4NNTPbxowr3nXM+9dccZuyENTD2evVo+gnXPvUqjRD7wjF09qOqAvfCXx71/q3q71WRYPag1fD4LwMe9jBNDPhWSbD1cuu+8dvK/vUX+xD5hLGo9SjqbvgowsL2iZUE+QVZRPY135Duoc6i9fM3ju3noUT05c6E+jbyovdLmPz54vWs9RrsgPXsPob1VaMM+avRuPT5gcL6IbZG9A24TP4O5Wz3wKgK/n61zvYaXwj4tEjI9rChMviOLVL3QFBM/AL0hPZrA9L49eiW9YOJEP+Mn9TwdDUK/9fLMvLHUEj8S7XE8WYHpvmP0XbxSukQ/I/e4O4l6Pr8N2+46u8ESP3pUF7xmMua+o7RZPLmtwT73U5W8r64jvnjUqjwI0zs+ZYSvvGcUAj7B4cg85vfBPlS0mrw/eDC+0nkDPdYAEz+H8La8nxvxvlSEMj3pRMI+CwwCvQLNPb6YmVE9izY9Pio7Eb2R9cY9rrxgPX3Cwj7SRQm9oHZTvgnmfz2oOD4+mDAaveJ0mj3ijoc9USwQvPUCFL1rRLY+nTKHPfJMPz6Vsu28YoxVPYjZjj1swsM+2CflvFWMf76vgp49YSJAPo0FB70r9ws9JDKmPRg0xD7tOAS9n5aJvmPktT21FkE+jDwavTbaXjyenb09b7bEPksfGb1A2ZS+SlrNPVJwFD8i8DC99k4Yv1ca5T09TMU+Pq1hvRzjob4A4/Q9C6NDPiSUe72/eCm9U7b8PZ4kxj7W9369daC0viVIBj4KcUU+J++NvciVpL0MOwo+NsejutQ5kb3UOUI+fzQKPql8Rz71dIm9oP7+vd4xDj6pqjo6h46OvfHGFT6aNQ4+/npJPtCQiL07jyu+Lz0SPq28LTuVbY+9jLvTPRVLEj7TeEs+gjGLvSOZV77cXBY+Cj2XOzzRk73Ei3U9DnUWPnLyQb6jXJG9gXSoPgyUEj6CFdo7reKDvWDMcjzxthI+lYBPPklHg72FTZi+Wt0WPiELCzxzdo+9HEfSvNkJFz7OclE+moOQva7grb45Ohs+WtkrPJ5snr2EUo+9N3EbPq/YO75tSqG9ORBKPnGvFz7RNMG+TDWZvfGm7T4C9Q8+tEMSvy8yhr0KfDs/g0EEPh0ywL6iZVC9mO/WPtoi+T094ze+3AEuvadj5D3Yx/E9ahuEPCffJL3g60a+8XDyPc6tNr4QyTS9QAWvPU8i6z2W9I082sgtvYsdYr4D2Os9qCRaPrLfP72XzQa/zJH0PY9amDzEAmu9Nf1+vs9U9T0n4DO+8Wh/vS0WzTziIu495UimPOtbfb0GxJK+uvfuPQ0QMr65a4q9BwhnvF7Y5z1iUbU8lf+KvXeWp750wOg9c2NfPsdnmL0K/yO/86/xPUDHxTwRpbK9wpi+vhut8j2+tS2+fOTBvfnO3b1Ruus9iQC6viVUxr3Zvhw+/9jcPcyQDr8SD8C9u9DTPoIJxj3gnbi+GB2vvRZxvj2PRLc9v+sNvwhOq72SDLc+eo+gPXhit74wqZy9HYMiPcPjkT21Vw2/KAmbvXFTnT68jHY9TUa2vh5zjr00xQe8xGJZPQHRDL8Dyo69+fiFPhZTLD0WQrW+QRKEvYSuVb3AUg89QlQMv0g1hr1PwmA++tXEPJAGPr9+b3q9s0r7PltwFjw4vm+/lTpSvdh7Qz+lbhy87549v42sE72PE+k+zZLHvNtvb799w9y8OYA8PxNoML2kZD2/Aj9IvGne3j4pA2297mULv79wZrtcHA4+VM+MvSFWPb+KJ0K6C1fcPo0aq73uYQu/CuIAPNa7DD6oZ8G9ueuyvvLqLTz1axy+9bfPvTpzC7/Cufc7A7ISPtUH5r27DbO+Kc4qPFWPFr7aWvS9D4QLv5dA9Tsdfxg+tlYFvot+Pb/QbCs8WE7jPo5/FL7VlAu/UXOePAFNHj4uqh++omezvlHHtzwpGAe+Sdcmvum7C7/aKaI86cUrPgoFMr6fwT2/raW9PFPn7j4/M0G+D8pvv1UMBT0vOUQ/ImJUvu/skL/z1kM9QdyIP0iSa77dEXC/E7eNPTm7Sj/pxn6+3GA+v/Mmrj2SkwU/7gCHvhzCDL8+hsM9f52DPkuijL6kX7a+uQ3OPY2mhLsMSJC+ZY4mvkbjzT1E2Ia+bvKRvhAl/TynGcM9eNEFv22hkb6uZCm+dbCtPQRzTr4SU5O+FCq5vmtupT2kvu49HQeXvmjMK77KNKo9/CgZvuu+mL6HW7q+bhSkPYcqLD4ReZy+Kiwuvmn3qj1rMsm98zaevh2Mu75J8aY9i9ZgPjH3ob7vkjC+oO+vPWLDPb05u6O+WPOuPNUJrj2XFp++PYOjvtVVXD60T6E98E8Tvy5Pob689ps8y72JPReehL5FHaG+Kjk1vpNDfj2UlF09vtqhvGWUv7ykTBs86xdkPKGvpbzl41++etwfPEqBnj4sgsm8JNXBvPmmgjygwqM8mGLNvIJWLz5t7YU8QwWJvsFUsbyKncW8Zik0PIcz9zyLSLW8n6pgvr4MPjzUEac+4jrZvPBCyLy3fJQ8n80YPTk83bxVgC4+bJmaPCqYf76mUMG8NQO7Po1oYzxRSQm/lXiFvO8FLj4tuk47P/50vjBBU7wen868W7rVuuHsXj3+hFu8RwAuPoocDrqagHS+09YjvOpdzrzVPq67iB5cPQUYLLyyhWG+vwaLu9SBsD7NQnS8Iz/NvDrOLTufwk89h3h8vCkcLj7ySXA7w+h2vnDBRLw37ro+iYKXuoRbCL96PZq7phIuPvB5QbyJGHa+h1Wrukt1zLwOHYi8+hdHPb7C7LrMfC4+ViaAvBZBf75h7dE6Ra7IvI39qLxLbx09nbWROjIELz5rsaK8RniFvhZwlDul8MO8RGfNvPVJ0jw8w4Q71asvPpYyybzZtIy+NjH1Ox4hvrxCOfa8yyckPFz75TvjHV++BZX0vDQNlj7TXy47QR23vMuQxLy1dRG8dxMRO2FVXr4sBca8CWKNPsiCC7s4ebG8FceYvOQsxbwZ6Ce7sOMxPp64nLzhKKW+0CpvOsUMrbyAktG8yWgTvVJqADoTDV2++XfXvIV6fj5v13q7jvmmvIfArrxsala9XMeKu2JbXL4lVLe8TyJvPlPnC7xY26G8NxGRvAVuh729YBK8/9MzPtHmm7xoi7q+b6qxu8nzvT6MmNe8Obgpv0PyAjsNfDQ+tBsivRzjwb65+7Q7xseVvFMhQb0LOMq9OQCpO9rNWb4LOEm9vv02PpTZbDp1bdC+Y5Q6vUI26T5ZLu27DmxYvgZEFb2XYBg+edg7vHnSgLxWEwm9694evpz/QLyqZle+A8kVvQfWAT6i9oK8wEZxvPhlC72jczW+TWCFvJNeVr4Z6hm9vCHWPd+sp7wpzc6+ZFkRvY4yxT4L2um8C01VvlyY47yZ5qY9bf0FvaiYUbw8Pta8OhlhvrUJB73mEzs+RUL6vJ5IBb+2JPC8GGBEvLvHJ71ncXO+bxvyvJB0U75yQTu91gArPVL4Cb2bR82+6NU3vQ+roz6U0Cq9YiFSvg+mHb3vz1c8DaA7va2szL7SkRy9W0GWPoVfXL0PAlG+XYcEvUqVNLwBGG29UynMvoJuBb0i5Yo+OuGGvc4OUL68at68ngMBvb0zj70Lusu+15PjvEdHgT4QgJ+9/0BPvlo1urzD9Ee9WMqnvQTv47vmNMK8qXmyvkgTqL0Qk06+mFH7vMP8gb2aVrC9L/XKvt7bAr2FpGA+LpPAvT5PF79fxuG843YAP9HI2L3+hcq+w46PvHBjTT5//Oi9Zf1MvhtkXbxW2se9mC/xvQBMyr4UXn28WWJDPlGvAL7LChe/Ndg+vBMH9T6pxAy+QxnKvoQbCLutnDo+JtoUvrNcTL4Dg8066ofjvXzwGL5QGMq+pHQruq9yOj7wBSG+Dv8Wvx3KQzvT+PI+VxotvqIcyr4Oc0w8zjM7PvcvNb5iDRe/Vi2EPPh09T6ERUG+FFLKviu50jxRdUQ+SF1Jvgo0F78ZKPI8Ei78Pux1Vb5Mucq+V20hPSJXVj7RkV2+6T1OvgiTMj2+v5C9xrFhvuZS47vNyCw9mtayviXWYb4Nf0++lisQPUejMr2H/GW+VObLvvWYDD0MGoU+dSRuvluDUL7O5CE9g76xvAxQcr79b8y+yB0gPTf+kD58fXq+pKlRvrJQNz1SVko7lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAADo+z71IK0i+KAUfPQfB7D71P9e9yopyupnmRD33lTo+qEnXvdseRj7g01M9vGjCvedcz711xxm7IQ1MPZ69Wj6Cdc+9SqNEPvCMXT2o6oC98JfHvX+rervVZFg9qDV8PgvAx72ME0M+FZJsPVy677x28r+9Rf7EPmEsaj1KOpu+CjCwvaJlQT5BVlE9jXfkO6hzqL18zeO7eehRPTlzoT6NvKi90uY/Pni9az1GuyA9ew+hvVVowz5q9G49PmBwvohtkb0DbhM/g7lbPfAqAr+frXO9hpfCPi0SMj2sKEy+I4tUvdAUEz8AvSE9msD0vj16Jb1g4kQ/4yf1PB0NQr/18sy8sdQSPxLtcTxZgem+Y/RdvFK6RD8j97g7iXo+vw3b7jq7wRI/elQXvGYy5r6jtFk8ua3BPvdTlbyvriO+eNSqPAjTOz5lhK+8ZxQCPsHhyDzm98E+VLSavD94ML7SeQM91gATP4fwtryfG/G+VIQyPelEwj4LDAK9As09vpiZUT2LNj0+KjsRvZH1xj2uvGA9fcLCPtJFCb2gdlO+CeZ/Pag4Pj6YMBq94nSaPeKOhz1RLBC89QIUvWtEtj6dMoc98kw/PpWy7bxijFU9iNmOPWzCwz7YJ+W8VYx/vq+Cnj1hIkA+jQUHvSv3Cz0kMqY9GDTEPu04BL2flom+Y+S1PbUWQT6MPBq9NtpePJ6dvT1vtsQ+Sx8ZvUDZlL5KWs09UnAUPyLwML32Thi/VxrlPT1MxT4+rWG9HOOhvgDj9D0Lo0M+JJR7vb94Kb1Ttvw9niTGPtb3fr11oLS+JUgGPgpxRT4n7429yJWkvQw7Cj42x6O61DmRvdQ5Qj5/NAo+qXxHPvV0ib2g/v693jEOPqmqOjqHjo698cYVPpo1Dj7+ekk+0JCIvTuPK74vPRI+rbwtO5Vtj72Mu9M9FUsSPtN4Sz6CMYu9I5lXvtxcFj4KPZc7PNGTvcSLdT0OdRY+cvJBvqNckb2BdKg+DJQSPoIV2jut4oO9YMxyPPG2Ej6VgE8+SUeDvYVNmL5a3RY+IQsLPHN2j70cR9K82QkXPs5yUT6ag5C9ruCtvjk6Gz5a2Ss8nmyevYRSj703cRs+r9g7vm1Kob05EEo+ca8XPtE0wb5MNZm98abtPgL1Dz60QxK/LzKGvQp8Oz+DQQQ+HTLAvqJlUL2Y79Y+2iL5PT3jN77cAS69p2PkPdjH8T1qG4Q8J98kveDrRr7xcPI9zq02vhDJNL1ABa89TyLrPZb0jTzayC29ix1ivgPY6z2oJFo+st8/vZfNBr/MkfQ9j1qYPMQCa701/X6+z1T1PSfgM77xaH+9LRbNPOIi7j3lSKY861t9vQbEkr669+49DRAyvrlrir0HCGe8XtjnPWJRtTyV/4q9d5anvnTA6D1zY18+x2eYvQr/I7/zr/E9QMfFPBGlsr3CmL6+G63yPb61Lb585MG9+c7dvVG66z2JALq+JVTGvdm+HD7/2Nw9zJAOvxIPwL270NM+ggnGPeCduL4YHa+9FnG+PY9Etz2/6w2/CE6rvZIMtz56j6A9eGK3vjCpnL0dgyI9w+ORPbVXDb8oCZu9cVOdPryMdj1NRra+HnOOvTTFB7zEYlk9AdEMvwPKjr35+IU+FlMsPRZCtb5BEoS9hK5VvcBSDz1CVAy/SDWGvU/CYD761cQ8kAY+v35ver2zSvs+W3AWPDi+b7+VOlK92HtDP6VuHLzvnj2/jawTvY8T6T7Nkse8229vv33D3Lw5gDw/E2gwvaRkPb8CP0i8ad7ePikDbb3uZQu/v3Bmu1wcDj5Uz4y9IVY9v4onQroLV9w+jRqrve5hC78K4gA81rsMPqhnwb2567K+8uotPPVrHL71t8+9OnMLv8K59zsDshI+1QfmvbsNs74pzio8VY8Wvtpa9L0PhAu/l0D1Ox1/GD62VgW+i349v9BsKzxYTuM+jn8UvtWUC79Rc548AU0ePi6qH76iZ7O+Uce3PCkYB75J1ya+6bsLv9opojzpxSs+CgUyvp/BPb+tpb08U+fuPj8zQb4Pym+/VQwFPS85RD8iYlS+7+yQv/PWQz1B3Ig/SJJrvt0RcL8Tt409ObtKP+nGfr7cYD6/8yauPZKTBT/uAIe+HMIMvz6Gwz1/nYM+S6KMvqRftr65Dc49jaaEuwxIkL5ljia+RuPNPUTYhr5u8pG+ECX9PKcZwz140QW/baGRvq5kKb51sK09BHNOvhJTk74UKrm+a26lPaS+7j0dB5e+aMwrvso0qj38KBm+676Yvodbur5uFKQ9hyosPhF5nL4qLC6+afeqPWsyyb3zNp6+HYy7vknxpj2L1mA+Mfehvu+SML6g7689YsM9vTm7o75Y86481QmuPZcWn749g6O+1VVcPrRPoT3wTxO/Lk+hvrz2mzzLvYk9F56EvkUdob4qOTW+k0N+PZSUXT007aK+j+6MPAhZgT0bnV++oa+lvOXjX7563B88SoGePiyCybwk1cG8+aaCPKDCozyYYs28glYvPm3thTxDBYm+wVSxvIqdxbxmKTQ8hzP3PItItbyfqmC+vgw+PNQRpz7iOtm88ELIvLd8lDyfzRg9OTzdvFWALj5smZo8Kph/vqZQwbw1A7s+jWhjPFFJCb+VeIW87wUuPi26Tjs//nS+MEFTvB6fzrxbutW64exePf6EW7xHAC4+ihwOupqAdL7T1iO86l3OvNU+rruIHlw9BRgsvLKFYb6/Bou71IGwPs1CdLwjP828Os4tO5/CTz2HeHy8KRwuPvJJcDvD6Ha+cMFEvDfuuj6Jgpe6hFsIv3o9mrumEi4+8HlBvIkYdr6HVau6S3XMvA4diLz6F0c9vsLsusx8Lj5WJoC8FkF/vmHt0TpFrsi8jf2ovEtvHT2dtZE6MgQvPmuxorxGeIW+FnCUO6Xww7xEZ8289UnSPDzDhDvVqy8+ljLJvNm0jL42MfU7HiG+vEI59rzLJyQ8XPvlO+MdX74FlfS8NA2WPtNfLjtBHbe8y5DEvLV1Ebx3ExE7YVVeviwFxrwJYo0+yIILuzh5sbwVx5i85CzFvBnoJ7uw4zE+nricvOEopb7QKm86xQytvICS0bzJaBO9UmoAOhMNXb75d9e8hXp+Pm/XeruO+aa8h8CuvGxqVr1cx4q7YltcviVUt7xPIm8+U+cLvFjbobw3EZG8BW6Hvb1gErz/0zM+0eabvGiLur5vqrG7yfO9PoyY17w5uCm/Q/ICOw18ND60GyK9HOPBvrn7tDvGx5W8UyFBvQs4yr05AKk72s1Zvgs4Sb2+/TY+lNlsOnVt0L5jlDq9QjbpPlku7bsObFi+BkQVvZdgGD552Du8edKAvFYTCb3r3h6+nP9AvKpmV74DyRW9B9YBPqL2grzARnG8+GULvaNzNb5NYIW8k15WvhnqGb28IdY936ynvCnNzr5kWRG9jjLFPgva6bwLTVW+XJjjvJnmpj1t/QW9qJhRvDw+1rw6GWG+tQkHveYTOz5FQvq8nkgFv7Yk8LwYYES8u8cnvWdxc75vG/K8kHRTvnJBO73WACs9UvgJvZtHzb7o1Te9D6ujPpTQKr1iIVK+D6Ydve/PVzwNoDu9razMvtKRHL1bQZY+hV9cvQ8CUb5dhwS9SpU0vAEYbb1TKcy+gm4FvSLlij464Ya9zg5Qvrxq3ryeAwG9vTOPvQu6y77Xk+O8R0eBPhCAn73/QE++WjW6vMP0R71Yyqe9BO/ju+Y0wrypebK+SBOovRCTTr6YUfu8w/yBvZpWsL0v9cq+3tsCvYWkYD4uk8C9Pk8Xv1/G4bzjdgA/0cjYvf6Fyr7Djo+8cGNNPn/86L1l/Uy+G2RdvFbax72YL/G9AEzKvhRefbxZYkM+Ua8AvssKF7812D68Ewf1PqnEDL5DGcq+hBsIu62cOj4m2hS+s1xMvgODzTrqh+O9fPAYvlAYyr6kdCu6r3I6PvAFIb4O/xa/HcpDO9P48j5XGi2+ohzKvg5zTDzOMzs+9y81vmINF79WLYQ8+HT1PoRFQb4UUsq+K7nSPFF1RD5IXUm+CjQXvxko8jwSLvw+7HVVvky5yr5XbSE9IldWPtGRXb7pPU6+CJMyPb6/kL3GsWG+5lLju83ILD2a1rK+JdZhvg1/T76WKxA9R6MyvYf8Zb5U5su+9ZgMPQwahT51JG6+W4NQvs7kIT2DvrG8DFByvv1vzL7IHSA9N/6QPnx9er6kqVG+slA3PVJWSjv1rn6+c7EovHKRNz3n34y+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.13719937205314636, 0.9455202221870422, 0.883267879486084, 0.3241580128669739, 0.897824227809906, 0.2754460275173187, 0.9104159474372864, 0.7693707942962646, 0.5555956363677979, 0.2020452618598938, 0.9278361797332764, 0.8304445147514343, 0.5634552836418152, 0.7574928998947144, 0.5905201435089111, 0.2537570595741272, 0.9037778973579407, 0.2368222177028656, 0.9135667085647583, 0.7957863807678223, 0.48212572932243347, 0.8273695707321167, 0.488566517829895, 0.8391197323799133, 0.5705462694168091, 0.7803183794021606, 0.618432879447937, 0.25096505880355835, 0.9131265878677368, 0.7292927503585815, 0.6890793442726135, 0.6930261254310608, 0.7302887439727783, 0.6463812589645386, 0.2283569574356079, 0.9265924096107483, 0.8258713483810425, 0.4770001769065857, 0.8642448782920837, 0.621516764163971, 0.7447679042816162, 0.6967484951019287, 0.677482008934021, 0.7603904604911804, 0.5959271788597107, 0.8122093081474304, 0.49755772948265076, 0.8166764974594116, 0.42987048625946045, 0.8757345080375671, 0.3428109884262085, 0.8995289206504822, 0.7375515103340149, 0.39223575592041016, 0.14446179568767548, 0.9382781386375427, 0.8464528918266296, 0.5299351215362549, 0.8251839280128479, 0.476645827293396, 0.15287822484970093, 0.939720869064331, 0.8739405870437622, 0.33450618386268616, 0.893109917640686, 0.27183032035827637, 0.09085450321435928, 0.9544187784194946, 0.9248446226119995, 0.8414086103439331, 0.6262593865394592, 0.6810675263404846, 0.689680278301239, 0.618844211101532, 0.7388423681259155, 0.5580809712409973, 0.7772582173347473, 0.5001614689826965, 0.8078552484512329, 0.5543692111968994, 0.25420576333999634, 0.8978145122528076, 0.25054264068603516, 0.9040773510932922, 0.7698517441749573, 0.544794499874115, 0.7876192927360535, 0.4834475815296173, 0.8147203326225281, 0.5000714659690857, 0.8085223436355591, 0.4827721118927002, 0.8317233324050903, 0.561135470867157, 0.7762807607650757, 0.4067894518375397, 0.13016876578330994, 0.05212301015853882, 0.9690552353858948, 0.9586198329925537, 0.9320699572563171, 0.8502817749977112, 0.6093135476112366, 0.27321985363960266, 0.8930248022079468, 0.6584535837173462, 0.7683393955230713, 0.5655114650726318, 0.82439124584198, 0.4634256362915039, 0.8659628629684448, 0.638332724571228, 0.29768359661102295, 0.8830918669700623, 0.6542655229568481, 0.7500438690185547, 0.3773428797721863, 0.8888494968414307, 0.6487245559692383, 0.7514327168464661, 0.34327083826065063, 0.898598849773407, 0.6842237114906311, 0.2794755697250366, 0.9078225493431091, 0.7340961694717407, 0.6731790900230408, 0.7401441931724548, 0.33594703674316406, 0.8980317711830139, 0.6753561496734619, 0.2637201249599457, 0.914222240447998, 0.7618116140365601, 0.6292572617530823, 0.7794030904769897, 0.600063681602478, 0.7989006042480469, 0.563621461391449, 0.8197431564331055, 0.4809440076351166, 0.8431661128997803, 0.5049905180931091, 0.8345053195953369, 0.47711068391799927, 0.8547343015670776, 0.5638241767883301, 0.804248571395874, 0.5858399271965027, 0.7930123209953308, 0.39634421467781067, 0.11876644194126129, 0.9499401450157166, 0.9006449580192566, 0.7197067141532898, 0.3354465365409851, 0.8889331817626953, 0.6396569013595581, 0.772725522518158, 0.599037766456604, 0.7978551387786865, 0.4455600380897522, 0.8524134755134583, 0.5339066386222839, 0.17154662311077118, 0.937429666519165, 0.855592668056488, 0.5831952095031738, 0.7778592109680176, 0.6179322004318237, 0.7569401264190674, 0.6454053521156311, 0.7383816838264465, 0.6670306324958801, 0.7223247289657593, 0.3160020411014557, 0.9010210633277893, 0.7179820537567139, 0.3329188823699951, 0.8901341557502747, 0.6690647006034851, 0.7321866750717163, 0.33952000737190247, 0.8916082382202148, 0.6777668595314026, 0.720153272151947, 0.31574514508247375, 0.9015423655509949, 0.28536882996559143, 0.9118154048919678, 0.24589796364307404, 0.9231727123260498, 0.7983414530754089, 0.4533107578754425, 0.8427982926368713, 0.5093666315078735, 0.844536542892456, 0.45235079526901245, 0.8668922781944275, 0.6098235249519348], "advantages": [-0.6832361221313477, -0.7623199224472046, -1.4601459503173828, -1.237363576889038, -2.2146105766296387, -2.1727120876312256, -2.9458534717559814, -3.0773112773895264, -2.4560582637786865, -3.9368882179260254, -4.33083963394165, -4.756577014923096, -4.537454128265381, -3.4821078777313232, -5.467580318450928, -4.592899322509766, -2.3672873973846436, -5.548250675201416, -3.343376398086548, -6.375543117523193, -7.880308628082275, -8.514739036560059, -8.657002449035645, -7.978157043457031, -9.394831657409668, -9.994200706481934, -10.15439510345459, -10.747709274291992, -10.633482933044434, -11.532987594604492, -11.713919639587402, -12.289694786071777, -12.477373123168945, -13.052756309509277, -13.241198539733887, -12.435039520263672, -13.938089370727539, -14.553775787353516, -14.68388557434082, -15.347989082336426, -15.184497833251953, -16.23296546936035, -16.1124267578125, -17.134666442871094, -17.06755828857422, -18.051389694213867, -18.051103591918945, -17.014686584472656, -19.17130470275879, -20.0139102935791, -20.198976516723633, -20.94585609436035, -21.242944717407227, -20.518190383911133, -18.819480895996094, -16.77690315246582, -20.551326751708984, -23.55690574645996, -25.070968627929688, -24.85271453857422, -26.2314453125, -26.191831588745117, -27.26449966430664, -27.321903228759766, -28.340612411499023, -28.585344314575195, -29.323650360107422, -28.092811584472656, -29.969758987426758, -30.671058654785156, -30.21611213684082, -28.77077865600586, -31.73383140563965, -30.56482696533203, -33.048648834228516, -32.22014617919922, -34.1186408996582, -33.69059753417969, -34.894290924072266, -34.924461364746094, -33.991641998291016, -32.6479606628418, -35.719425201416016, -34.5211296081543, -37.23918533325195, -38.16288375854492, -38.46779251098633, -38.91407775878906, -37.56731033325195, -39.473140716552734, -38.001060485839844, -39.9852294921875, -41.25992965698242, -40.68833923339844, -39.39668655395508, -41.33430480957031, -42.91082000732422, -43.489784240722656, -43.4068489074707, -44.65269088745117, -45.27902603149414, -45.3729362487793, -45.4395751953125, -45.58658981323242, -45.498233795166016, -47.11289978027344, -48.49867248535156, -48.88654327392578, -50.209110260009766, -50.75872039794922, -52.01993942260742, -52.7369384765625, -53.20253372192383, -53.08909606933594, -54.99459457397461, -56.402679443359375, 12.071743965148926, 12.108894348144531, 11.676773071289062, 12.166440963745117, 11.300980567932129, 11.372553825378418, 10.886247634887695, 11.29710865020752, 12.803985595703125, 10.92104721069336, 10.127022743225098, 10.529043197631836, 9.733109474182129, 9.845380783081055, 9.296360969543457, 9.679487228393555, 11.218320846557617, 9.326764106750488, 8.513236999511719, 8.960230827331543, 8.111382484436035, 8.611445426940918, 7.714823246002197, 8.28856086730957, 7.329068183898926, 7.288589000701904, 6.898428916931152, 6.796186447143555, 6.4617791175842285, 7.272472381591797, 6.081640720367432, 5.8660783767700195, 5.659859657287598, 5.372581958770752, 5.236370086669922, 6.365539073944092, 9.139998435974121, 6.333460330963135, 4.722204685211182, 4.18825101852417, 4.754950046539307, 3.7013492584228516, 4.178575038909912, 3.3239128589630127, 4.012305736541748, 2.994929790496826, 3.1596670150756836, 2.612265110015869, 3.751471996307373, 6.525382041931152, 3.936882495880127, 2.39058256149292, 2.0504608154296875, 2.2928028106689453, 1.7113906145095825, 2.264075994491577, 1.441030502319336, 2.295166492462158, 1.2389525175094604, 2.3730499744415283, 4.885766983032227, 2.6935598850250244, 1.2506216764450073, 0.9317762851715088, 1.1635041236877441, 2.935565948486328, 1.2256314754486084, 0.4357769191265106, 1.126481294631958, 2.8726069927215576, 1.122775912284851, 0.028035970404744148, 0.9063622951507568, -0.2586348056793213, 0.5556990504264832, -0.6199492812156677, 0.05718197301030159, 1.1061116456985474, 2.177203893661499, 0.6777712106704712, -0.7804844975471497, 0.03493981063365936, -1.3528562784194946, -0.6984062194824219]}
+{"type": "SampleBatch", "eps_id": [1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1267110314, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAPWufr5zsSi8cpE3PeffjL7x5H6+APlSvjgHIT2OUgA9j46BvqZFPLw7mCM9Gap+vq+sgb5ibjs+tDgPPfRMB7+4mX++775NvMHZxzwObWa+j9t/vh4DVb6B+6I8tx2aPRcPgr62FFi8zU+vPHonWL6qMYK+0J1VviC6jDzVxbQ9hVSEvqbVzr5aMJs82cvFPoR3iL4RJVa+03vaPBAgzD26m4q+TSrPvk/Q6jygIc0+acCOviX0Vr5WOhY9ruXvPbHmkL5ko8++4tIfPY+i1z7NDZW+MQ9YvkpTQj1wahA+6jaXvhDbh7zt4E09NZYLvmNil77YDDY+MLZCPexQ075WkJW+0EiTvK/mID0h89e9eL+VvsbLND5bQxg9kF3FvqLwk753Opy8iF7xPKeGpr2gIpS+2NQzPhUM5Dztprq+QVaSvqnvoryLUag8fnyBvWWKkr5QITM+qvWdPMbesr7Sv5C+vJmnvDxxSTwyfE+9dPWQvunuXL7w1zg8P8h7Pgsrk76YXKq8+LSEPJIJMb2PYZO+nEQyPjZAezwVWKm+MZmRvjAOrrzh3g48H0cIveTQkb426zE+6fcDPCV5pb5rCZC+RwWwvDmG0DqXMeW8v0GQvmbHMT6sLoc6yuujvqJ6jr74U7C8+wWwu05q3rwOs46+o9cxPgzRwbuynqS+yOuMvpgBr7zZQ0q8nZb7vMgjjb5Gf12+HVRUvEIfhD7RWo++3gqsvHSK/7u1eh693pGPvsdpMj7icgy8CeuqviHJjb7EK70+G9Z5vCMTIb+TAIq+IcIyPooB5LxFxq6+9DaIvi8KpbyI9w29kNdrvcRriL6r6Fu+C68SvYdYZT68noq+9MqcvAtWAL0IaaO96NCKvhfuWr5d3wa9nrZPPl4Bjb78RZW8yoLsvATizL0jMY2+Rwdavsrm/LwZzDs+Sl+PvqFIjryd2t689m7zvdKMj749Llm+IVTyvJQTKT7NuJG+QKSHvMJG17zYCAy+NeSRvmBdWL6Tru285hAXPhoOlL4GLYG86YLVvCreHb5wN5S+W49Xvi3F7rzwTQU+RV+WvqdxdbwGcdm80qsvvouGlr7Z1zg+hIz1vE/q8b5YrZS+/T5ovBx7Ib0c80G+gNKUvn24Vb4y/zC9SoW5PaD1lr599FS8d5MpvaqUXL6zF5e+9ntUvvU4O7367YI9qTeZviKjQLw9/DW90qR4vntWmb69KlO+d+BJvbiFET0Sc5u+YRvNvmT3Rr3W4J8+N42fvnG8Ub7HYi29CNeYOySmob6fYhW89QAtvZwlmr4LvqG+8H9QvtGqRb1MOLS8zdOjvhXIy74ueEe9OpmCPirnp74IFU++3ZIyvd5KV71L+am+VU7VuynhNr05pLe+XAqqvknCQT4eQ1S9sUMqv1YaqL6LpKi7l1+FvS5Bx77UJ6i+zBFMvlNQlb2EEfG9PzKqvp5dyb6YIpq9vbwaPj05rr4D6Um+FfKTvTxHKL4hPrC+xErIvkGtmr3Xc9Y9nz+0vj/CR75CY5a9bt5XvgE/tr4DNce+wQWfvWrlbD3yOrq+749Fvk2nnL3cP4S+tDS8vo4Vxr7FO6e92IkYPOUqwL6zqxS/JdqmvebgjD5JHca+J+XEvvSUm71iVCy9Yw3Kvp0ZFL8eTp298B9nPu75z772x8O+Zg+UvbmwuL1V5NO+944TvwPBl71QIzc+VMvZviQ4Rb+ubZC9vf/kPtuu4b4jCBO/kDd8vW1oCD52kOe+C73Bvu1Ncb3xhTa+Z3DrvpPcOr4B6H+9KlP6vsRO7b7T2sC+qPqTvbLBXb4vKvG+nhgSv3HZnL08+lY9NQL3vtjFv74as5q9KdWGvhbY+r6/ihG/eXylvTBgljs4VQC/fqO+vlpMpb0L+p++QT0CvzD0EL+tGLK9iY09vWojBb/ubL2+7v2zvebrur5YCAe/Y1EQvxXywr0kks+9QesJv6bjQb/ZGMe9rq4jPvfLDb/Hc3O/vYzAva831z5xqhK/KYSSvxZVr726lS4/xYYYvyfOcr8XZpO9OCu6Pu5hHb+znEC/WoGEvbk8ST0bPCG/sU9yvy9+gr04NaQ+vRQmv64kQL9ptmq995UNPIPsKb+D/g2/LwFqvZRlmr6Gwyy/vbk/v6Nagb1cqOC8KJkwv8SODb8zeoK9676tvu9tM79CQz+/g2CQvTARir0zQTe/IvBwv2sjk71zEk8+zRI8v99Nkb8A24q9elvxPrfiQb/7bXC/ABhvvQzwIT63sUa/1xORv4QjYr30G90+T39Mv+YDcL/iwj69a3P6PTBMUb/B5j2/RL40vcAlPb57GFW/Ia9vvwLgQ72Y9L89quNZv+mQPb9iMjy9L8davj6uXb94V2+/9bJNvTJ3gz2sd2K/w4yQv8BwSL1jZa4+3T9ov7P6br99iSy9CNAGPXAHbb+CYpC/P9cpvUnAnz7wzXK/IUipv9dHEL3eYRc/DJnluwOWSLwBWc287wiZuwyf7bvh9lO+4xzOvBtcjz6lozq8ftI8vNs8oLyiB6i8amo+vPpIPD4rmaO8cHujvhoqAryzkDO8oOnXvJsYB716wQW8d6FSvgNR3bxopoA+WChJvBkRJ7wAJrS8egZMvbp/TLzNrT0+OE+8vAbesr47zQ+8m4kcvAeM9bxFFIO9tO4SvKtyPj5EBAC9ImO7vmr6q7sVNg68pf8dveGlqr2pqrG7EhtQvhXTJL2Hq0k+Wm0bvACM97vhsBS9d4HdvRPnHbz7lEA+GY0dvZ/60r6FjcC7JZjUu8xOP7366ga+AM7Eu4u7Tb7qGUq9f1UVPp08JLxstqe7jic+vcTiJb716SW8v1VMvuVsS72J7Ow9FE1nvMCtdbvL8kG9bdtEvoyHaLwb60q+brJRvQRbrj1Pu5S8lrYZuwe5Sr3sm2S+sB2VvKRySb7xAl29KM1aPf1Ytby2Nci+rqJYvV4+qD4tavW83eJHvmy3Pb0QiqE8wrIKvYV9x77hGTy9jUmYPuKdKr2JiUa+MbwjvTCBGbztfzq9wN3Gvq6AJL3gdoo+fFFavV07Fb8uWQ69I9kMP0gJhb3JDUe/yo3CvL/rVD+H4qS9igUVv1Ij6bs/Gwg/dLq8vQgXxr7HlmY7dG5yPlaTzL09bUS+rjkHPI6MYL2/btS9By3GvtSF6juQOXY+Y0nkvf+mRL6oDUQ83aVMvRsn7L2+UMa+d64zPPtifD6bBPy98fxEvv04gjwTCC+94vIBvuWCxr5VcXY8UYWCPqPjCb7hcEW+7fykPGETB72J1g2+tv0DO8GVnzw8c6O++ssNvmAFRr7Wj1Y8E76nvNjBEb4BAse+J9pPPHB4jT6vtxm+AWZGvlYylTwQREq8fK8dviUtkTqJLJM8QjSZvq2pHb6a7Ua+G0xEPNlwdLowpCG+2N4yOuL9QzwlYpS+nKAhvqxTSD6BDco7hSUUv/CeHb53Bcg+1DOxuxKTXr+4nhW+RVBIPpC/uryNFRS/HZ0RvjHjVzrWwgy9JgmWvsyYEb7ZIUk+TcQkvfiqGL8Akw2+e5/2OtqeVb0iG6K+IokNvksURb62jm+9GDYnvS56Eb5KEWI71eZyvczms74YaBG+I1xDvs3Xh70Aqp+9VlAVvqYQxb5ICYu9mLZFPkgyHb7AZkG+siCDvbg39r2AECG+TvfoO1UNiL1zY92+OesgvhKAP75ew5m9uT8lvrS/JL49EMO+hF+gvRWK2j0ojSy+sywTv5gAnL3uVL8+SlM4vjDURL8fsoy9KDQkP1gSSL6LphK/rNhkvQjlpz7AzVO+4gPBvrX7Sb0wt5Q8OYZbvsKJOb7+fki9wU+Uvi48X74ZTMC+1jlgvdkGUrxO7Wa+beMRv6tGYb05MoY+GZlyvs5+v77+zUu9jC9CvQJCer7ugRG/ObBPvXeraj4B84K+2cK+vjDqPL2a7aG9s8OGvq8nEb9VZEO9GnxLPhaSjL5u7EK/9hwzvUKz8z4aXpS+TNIQvwAfDL0q6i0+EymavqJ/vb51avy8uKMIvk/znb6JlRC/myMJverzGD7Zu6O+Wwe9vkTO+bwQYB2+rYOnvrpZEL8tfgm91FIEPtNJrb6yLEK/YtD9vHt/0j4sDrW+UR0Qv2R0urwt6N496NG6vtgpvL5Bn6i8Y4RDvk6Vvr6/9A+/o+fHvBXwwj1qV8S+ytBBv1BPuLx+mMI+FhjMvtLID78RFHS8j52kPXDY0b4irkG/a71ZvAWVvD65l9m+aa4Pvz8YwruPZJI9lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAPHkfr4A+VK+OAchPY5SAD2PjoG+pkU8vDuYIz0Zqn6+r6yBvmJuOz60OA899EwHv7iZf77vvk28wdnHPA5tZr6P23++HgNVvoH7ojy3HZo9Fw+CvrYUWLzNT688eidYvqoxgr7QnVW+ILqMPNXFtD2FVIS+ptXOvlowmzzZy8U+hHeIvhElVr7Te9o8ECDMPbqbir5NKs++T9DqPKAhzT5pwI6+JfRWvlY6Fj2u5e89seaQvmSjz77i0h89j6LXPs0Nlb4xD1i+SlNCPXBqED7qNpe+ENuHvO3gTT01lgu+Y2KXvtgMNj4wtkI97FDTvlaQlb7QSJO8r+YgPSHz1714v5W+xss0PltDGD2QXcW+ovCTvnc6nLyIXvE8p4amvaAilL7Y1DM+FQzkPO2mur5BVpK+qe+ivItRqDx+fIG9ZYqSvlAhMz6q9Z08xt6yvtK/kL68mae8PHFJPDJ8T7109ZC+6e5cvvDXODw/yHs+CyuTvphcqrz4tIQ8kgkxvY9hk76cRDI+NkB7PBVYqb4xmZG+MA6uvOHeDjwfRwi95NCRvjbrMT7p9wM8JXmlvmsJkL5HBbC8OYbQOpcx5by/QZC+ZscxPqwuhzrK66O+onqOvvhTsLz7BbC7TmrevA6zjr6j1zE+DNHBu7KepL7I64y+mAGvvNlDSrydlvu8yCONvkZ/Xb4dVFS8Qh+EPtFaj77eCqy8dIr/u7V6Hr3ekY++x2kyPuJyDLwJ66q+IcmNvsQrvT4b1nm8IxMhv5MAir4hwjI+igHkvEXGrr70Noi+LwqlvIj3Db2Q12u9xGuIvqvoW74LrxK9h1hlPryeir70ypy8C1YAvQhpo73o0Iq+F+5avl3fBr2etk8+XgGNvvxFlbzKguy8BOLMvSMxjb5HB1q+yub8vBnMOz5KX4++oUiOvJ3a3rz2bvO90oyPvj0uWb4hVPK8lBMpPs24kb5ApIe8wkbXvNgIDL415JG+YF1YvpOu7bzmEBc+Gg6UvgYtgbzpgtW8Kt4dvnA3lL5bj1e+LcXuvPBNBT5FX5a+p3F1vAZx2bzSqy++i4aWvtnXOD6EjPW8T+rxvlitlL79Pmi8HHshvRzzQb6A0pS+fbhVvjL/ML1Khbk9oPWWvn30VLx3kym9qpRcvrMXl772e1S+9Tg7vfrtgj2pN5m+IqNAvD38Nb3SpHi+e1aZvr0qU7534Em9uIURPRJzm75hG82+ZPdGvdbgnz43jZ++cbxRvsdiLb0I15g7JKahvp9iFbz1AC29nCWavgu+ob7wf1C+0apFvUw4tLzN06O+FcjLvi54R706mYI+KuenvggVT77dkjK93kpXvUv5qb5VTtW7KeE2vTmkt75cCqq+ScJBPh5DVL2xQyq/VhqovoukqLuXX4W9LkHHvtQnqL7MEUy+U1CVvYQR8b0/Mqq+nl3Jvpgimr29vBo+PTmuvgPpSb4V8pO9PEcoviE+sL7ESsi+Qa2avddz1j2fP7S+P8JHvkJjlr1u3le+AT+2vgM1x77BBZ+9auVsPfI6ur7vj0W+Taecvdw/hL60NLy+jhXGvsU7p73YiRg85SrAvrOrFL8l2qa95uCMPkkdxr4n5cS+9JSbvWJULL1jDcq+nRkUvx5Onb3wH2c+7vnPvvbHw75mD5S9ubC4vVXk0773jhO/A8GXvVAjNz5Uy9m+JDhFv65tkL29/+Q+267hviMIE7+QN3y9bWgIPnaQ574LvcG+7U1xvfGFNr5ncOu+k9w6vgHof70qU/q+xE7tvtPawL6o+pO9ssFdvi8q8b6eGBK/cdmcvTz6Vj01Ave+2MW/vhqzmr0p1Ya+Ftj6vr+KEb95fKW9MGCWOzhVAL9+o76+WkylvQv6n75BPQK/MPQQv60Ysr2JjT29aiMFv+5svb7u/bO95uu6vlgIB79jURC/FfLCvSSSz71B6wm/puNBv9kYx72uriM+98sNv8dzc7+9jMC9rzfXPnGqEr8phJK/FlWvvbqVLj/Fhhi/J85yvxdmk704K7o+7mEdv7OcQL9agYS9uTxJPRs8Ib+xT3K/L36CvTg1pD69FCa/riRAv2m2ar33lQ08g+wpv4P+Db8vAWq9lGWavobDLL+9uT+/o1qBvVyo4LwomTC/xI4NvzN6gr3rvq2+720zv0JDP7+DYJC9MBGKvTNBN78i8HC/ayOTvXMSTz7NEjy/302RvwDbir16W/E+t+JBv/ttcL8AGG+9DPAhPrexRr/XE5G/hCNivfQb3T5Pf0y/5gNwv+LCPr1rc/o9MExRv8HmPb9EvjS9wCU9vnsYVb8hr2+/AuBDvZj0vz2q41m/6ZA9v2IyPL0vx1q+Pq5dv3hXb7/1sk29MneDPax3Yr/DjJC/wHBIvWNlrj7dP2i/s/puv32JLL0I0AY9cAdtv4JikL8/1ym9ScCfPvDNcr8hSKm/10cQvd5hFz9ik3m/zz6Qvzatv7yNVpM+DJ/tu+H2U77jHM68G1yPPqWjOrx+0jy82zygvKIHqLxqaj68+kg8PiuZo7xwe6O+GioCvLOQM7yg6de8mxgHvXrBBbx3oVK+A1HdvGimgD5YKEm8GREnvAAmtLx6Bky9un9MvM2tPT44T7y8Bt6yvjvND7ybiRy8B4z1vEUUg7207hK8q3I+PkQEAL0iY7u+avqruxU2Dryl/x294aWqvamqsbsSG1C+FdMkvYerST5abRu8AIz3u+GwFL13gd29E+cdvPuUQD4ZjR29n/rSvoWNwLslmNS7zE4/vfrqBr4AzsS7i7tNvuoZSr1/VRU+nTwkvGy2p7uOJz69xOIlvvXpJby/VUy+5WxLvYns7D0UTWe8wK11u8vyQb1t20S+jIdovBvrSr5uslG9BFuuPU+7lLyWthm7B7lKveybZL6wHZW8pHJJvvECXb0ozVo9/Vi1vLY1yL6uoli9Xj6oPi1q9bzd4ke+bLc9vRCKoTzCsgq9hX3HvuEZPL2NSZg+4p0qvYmJRr4xvCO9MIEZvO1/Or3A3ca+roAkveB2ij58UVq9XTsVvy5ZDr0j2Qw/SAmFvckNR7/KjcK8v+tUP4fipL2KBRW/UiPpuz8bCD90ury9CBfGvseWZjt0bnI+VpPMvT1tRL6uOQc8joxgvb9u1L0HLca+1IXqO5A5dj5jSeS9/6ZEvqgNRDzdpUy9Gyfsvb5Qxr53rjM8+2J8PpsE/L3x/ES+/TiCPBMIL73i8gG+5YLGvlVxdjxRhYI+o+MJvuFwRb7t/KQ8YRMHvYnWDb62/QM7wZWfPDxzo776yw2+YAVGvtaPVjwTvqe82MERvgECx74n2k88cHiNPq+3Gb4BZka+VjKVPBBESrx8rx2+JS2ROokskzxCNJm+rakdvprtRr4bTEQ82XB0ujCkIb7Y3jI64v1DPCVilL6coCG+rFNIPoENyjuFJRS/8J4dvncFyD7UM7G7EpNev7ieFb5FUEg+kL+6vI0VFL8dnRG+MeNXOtbCDL0mCZa+zJgRvtkhST5NxCS9+KoYvwCTDb57n/Y62p5VvSIbor4iiQ2+SxRFvraOb70YNie9LnoRvkoRYjvV5nK9zOazvhhoEb4jXEO+zdeHvQCqn71WUBW+phDFvkgJi72YtkU+SDIdvsBmQb6yIIO9uDf2vYAQIb5O9+g7VQ2IvXNj3b456yC+EoA/vl7Dmb25PyW+tL8kvj0Qw76EX6C9FYraPSiNLL6zLBO/mACcve5Uvz5KUzi+MNREvx+yjL0oNCQ/WBJIvoumEr+s2GS9COWnPsDNU77iA8G+tftJvTC3lDw5hlu+wok5vv5+SL3BT5S+LjxfvhlMwL7WOWC92QZSvE7tZr5t4xG/q0ZhvTkyhj4ZmXK+zn6/vv7NS72ML0K9AkJ6vu6BEb85sE+9d6tqPgHzgr7Zwr6+MOo8vZrtob2zw4a+rycRv1VkQ70afEs+FpKMvm7sQr/2HDO9QrPzPhpelL5M0hC/AB8MvSrqLT4TKZq+on+9vnVq/Ly4owi+T/OdvomVEL+bIwm96vMYPtm7o75bB72+RM75vBBgHb6tg6e+ulkQvy1+Cb3UUgQ+00mtvrIsQr9i0P28e3/SPiwOtb5RHRC/ZHS6vC3o3j3o0bq+2Cm8vkGfqLxjhEO+TpW+vr/0D7+j58e8FfDCPWpXxL7K0EG/UE+4vH6Ywj4WGMy+0sgPvxEUdLyPnaQ9cNjRviKuQb9rvVm8BZW8PrmX2b5prg+/PxjCu49kkj0FV9++vGq7vsA/k7sma2S+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.756100594997406, 0.6515371203422546, 0.27392688393592834, 0.9032630920410156, 0.7177965044975281, 0.6907975077629089, 0.7001656293869019, 0.29242339730262756, 0.9091400504112244, 0.2592725455760956, 0.9178235530853271, 0.22246022522449493, 0.9267336130142212, 0.8150317072868347, 0.5043784379959106, 0.8050561547279358, 0.5391262769699097, 0.7899214029312134, 0.5634126663208008, 0.7793945670127869, 0.5781745314598083, 0.7740053534507751, 0.41584810614585876, 0.8650074601173401, 0.6059991121292114, 0.7561507821083069, 0.6087162494659424, 0.7580081820487976, 0.6039019823074341, 0.765137791633606, 0.5913336277008057, 0.7772562503814697, 0.4296134114265442, 0.8560630679130554, 0.5659237504005432, 0.20581138134002686, 0.9263558387756348, 0.8268990516662598, 0.5296928882598877, 0.8078779578208923, 0.5639780163764954, 0.7897661924362183, 0.5955390930175781, 0.7707171440124512, 0.625027060508728, 0.7503579258918762, 0.6530634760856628, 0.7281825542449951, 0.6801934242248535, 0.7035447955131531, 0.2931455671787262, 0.9074767231941223, 0.7535005807876587, 0.6125473976135254, 0.7869656085968018, 0.5590490698814392, 0.8182992935180664, 0.5019524097442627, 0.8211316466331482, 0.4568959176540375, 0.8609375953674316, 0.6036350131034851, 0.7615033388137817, 0.3553500771522522, 0.10620827972888947, 0.9551283717155457, 0.9165684580802917, 0.7830103039741516, 0.5512691736221313, 0.8262907862663269, 0.46774110198020935, 0.8609398603439331, 0.38346102833747864, 0.8886261582374573, 0.6966401934623718, 0.653684675693512, 0.7513241767883301, 0.5858906507492065, 0.7949452996253967, 0.4842629134654999, 0.8081994652748108, 0.4692431092262268, 0.15298905968666077, 0.9438210725784302, 0.877147376537323, 0.3255527913570404, 0.8979164958000183, 0.2635405957698822, 0.9152228832244873, 0.2084365040063858, 0.9296020269393921, 0.8384625315666199, 0.6073265671730042, 0.30342909693717957, 0.8704909682273865, 0.6670771241188049, 0.6860288977622986, 0.6314540505409241, 0.285156786441803, 0.899448573589325, 0.2472555935382843, 0.9114115238189697, 0.7900082468986511, 0.5240451693534851, 0.7550365328788757, 0.5520618557929993, 0.7385621666908264, 0.4295436441898346, 0.8309191465377808, 0.40049228072166443, 0.8433223962783813, 0.6298428773880005, 0.6709883213043213, 0.6448736786842346, 0.3432188332080841, 0.8522629737854004, 0.4863392412662506, 0.8424087762832642, 0.49456992745399475, 0.8485194444656372, 0.5477163195610046, 0.8129344582557678, 0.4291001558303833, 0.8722109794616699, 0.38296541571617126, 0.8875553607940674, 0.6704058051109314, 0.7221771478652954, 0.29367727041244507, 0.9112568497657776, 0.7575416564941406, 0.6152802109718323, 0.7929198145866394, 0.556701123714447, 0.8237192630767822, 0.4931412637233734, 0.8505368232727051, 0.5740993022918701, 0.78038090467453, 0.6180276274681091, 0.7533022165298462, 0.6537297368049622, 0.2725464999675751, 0.09283673763275146, 0.9558241367340088, 0.9155663251876831, 0.7621207237243652, 0.6239885687828064, 0.7753652930259705, 0.6035820841789246, 0.7908192276954651, 0.5774814486503601, 0.8080897331237793, 0.4550636410713196, 0.850283145904541, 0.5335582494735718, 0.8301497101783752, 0.49839186668395996, 0.8329988718032837, 0.509502649307251, 0.17002059519290924, 0.06413768231868744, 0.9635510444641113, 0.9431396722793579, 0.12658293545246124, 0.9492011666297913, 0.896753191947937, 0.2916020452976227, 0.9133553504943848, 0.7691136002540588, 0.5800385475158691, 0.19020146131515503, 0.936056911945343, 0.852791428565979, 0.598386824131012, 0.25553426146507263, 0.9033126831054688, 0.7208718061447144, 0.32151052355766296, 0.900627851486206, 0.7274596095085144, 0.6292542219161987, 0.7590486407279968, 0.5876310467720032, 0.7853286862373352, 0.45330071449279785, 0.8328307867050171, 0.5316824913024902, 0.8127815127372742, 0.5038561820983887, 0.8262620568275452, 0.5247547626495361, 0.8002399206161499, 0.47011104226112366, 0.8391430377960205, 0.5495445728302002, 0.7885800004005432, 0.5474036931991577, 0.7929500937461853, 0.46395400166511536], "advantages": [10.71619701385498, 9.496904373168945, 10.224000930786133, 11.308004379272461, 9.940113067626953, 8.838640213012695, 9.475197792053223, 8.429200172424316, 7.431064605712891, 7.785325527191162, 6.861108303070068, 7.049731731414795, 6.1984124183654785, 6.217492580413818, 6.367193698883057, 6.959926605224609, 5.7406110763549805, 6.309293270111084, 5.112285614013672, 5.6639580726623535, 4.486131191253662, 5.027531623840332, 3.867297649383545, 3.000225067138672, 3.0318500995635986, 3.514237403869629, 2.37080454826355, 2.852041006088257, 1.7219473123550415, 2.2080397605895996, 1.0927358865737915, 1.5896426439285278, 0.491803914308548, -0.33763387799263, -0.32301193475723267, 0.155025452375412, 1.5050715208053589, -0.1369173675775528, -1.1637365818023682, -1.9489446878433228, -1.8420066833496094, -2.592057943344116, -2.5179710388183594, -3.2329299449920654, -3.190302848815918, -3.8703176975250244, -3.8571112155914307, -4.502234935760498, -4.515966892242432, -5.12598180770874, -5.163969039916992, -4.646975040435791, -5.509710788726807, -6.043408393859863, -6.056879997253418, -6.542786598205566, -6.569504261016846, -7.002768516540527, -7.467788219451904, -7.6845598220825195, -7.778643608093262, -8.108591079711914, -8.441168785095215, -8.750797271728516, -8.899496078491211, -8.093663215637207, -8.989055633544922, -9.177718162536621, -9.253134727478027, -9.658561706542969, -9.641874313354492, -10.104632377624512, -10.016752243041992, -10.50851058959961, -10.380828857421875, -10.469677925109863, -10.940350532531738, -10.937837600708008, -11.504593849182129, -11.446361541748047, -11.814696311950684, -12.157753944396973, -12.842756271362305, -13.141839027404785, -13.263139724731445, -13.28312873840332, -13.844392776489258, -13.972935676574707, -14.40645694732666, -14.686785697937012, -14.926937103271484, -15.401124000549316, -15.889636993408203, -16.464384078979492, -17.194381713867188, -17.680950164794922, -18.257383346557617, -18.896621704101562, -19.4682674407959, -19.60881805419922, -20.5964412689209, -20.547060012817383, -21.720386505126953, -22.682628631591797, -23.39211082458496, -24.158687591552734, -24.874181747436523, -25.668039321899414, -25.89198875427246, -27.09056854248047, -27.21060562133789, -28.5135440826416, -29.35718536376953, -30.072494506835938, -30.93113136291504, -31.531635284423828, 6.526088714599609, 6.3266754150390625, 6.083540916442871, 7.01475715637207, 5.698334693908691, 5.386723518371582, 5.2717084884643555, 6.424638748168945, 4.9197516441345215, 6.250074863433838, 4.62456750869751, 4.0756940841674805, 4.311397075653076, 6.0907416343688965, 4.151798725128174, 3.344545602798462, 4.013464450836182, 3.028205394744873, 3.9763708114624023, 2.794191360473633, 4.070122241973877, 2.6745963096618652, 2.4652323722839355, 2.5488569736480713, 2.09835147857666, 2.5073435306549072, 1.8085483312606812, 2.355356216430664, 3.9631409645080566, 1.6893315315246582, 0.9215003848075867, 1.937110185623169, 0.599037766456604, 1.710857629776001, 0.26438188552856445, 1.4385112524032593, -0.09456945210695267, 1.1069295406341553, 3.181992769241333, 0.8805528879165649, -0.7504717111587524, 0.5060690641403198, 2.4393715858459473, 0.234903022646904, 2.11749005317688, 4.167758464813232, 6.837975025177002, 4.385025978088379, 2.4337315559387207, 4.606293678283691, 2.8317625522613525, 1.2482519149780273, 3.125519275665283, 1.7339537143707275, 0.29299408197402954, 2.014551877975464, 3.6011769771575928, 2.557168483734131, 1.4866931438446045, 0.22868794202804565, -1.05729079246521, 0.45399731397628784, 1.6226900815963745, 2.4563777446746826, 1.9006891250610352, 1.168404221534729, 1.9297596216201782, 1.3716800212860107, 1.921997308731079, 1.5300136804580688, 0.7787219882011414, 1.419045329093933, 1.5690706968307495, 1.4618823528289795, 1.4684877395629883, 1.4843612909317017, 1.0726405382156372, 1.2647889852523804, 0.9960322380065918, 1.2049814462661743, 0.9722687005996704, 0.9141455888748169, 0.7524077892303467, 0.5713729858398438]}
+{"type": "SampleBatch", "eps_id": [1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1995249552, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAAVX3768aru+wD+TuyZrZL6YFuO+p6QPv+y3Erznqos9gNXovq2PQb9Gvvi7L1G3PpGT8L4QfHO/vo7huVz4JT/ZUPq+KIhBv6NkTTxuBLY+TwcBv3mgD78t8aA8082IPa3mA78UpkG/7OKrPOIxuz4pxge/g8YPv+zJ5zxxEKM9SqYKv9fTQb971fQ85x7DPrCGDr/1/A+/4qIZPYGryD3oZxG/WRJCv76pIT0Q9c0+jUkVv/BEEL/BnUI9mmz6PTYsGL/hYkK/GaJMPcrv2z54Dxy/+p8Qv7TSbz1Uthw+8/MevxfHQr8sXHw9d13tPjbZIr8GEBG/UyuRPdKKQz7uvyW/1Ma+vq39mD2d3aC9Uagnv5yXEb8LxpU9TWtyPsCRKr9UykO/anifPTMzDT8yfC6/0iMSv/gPtj36m5E+b2gxv5MUwb4MtsE9ei+sPLhWM7+z3Tu+cZLCPQIodr4wRzS/vnXCvs25uD1EP6U9ATk2v7SMPr7eB7w9tZo6vuksN7/HyMO+CpG0PX9BDT4eIjm/7yIUv4A3uj2lB+o+kxg8v7gVxb5u8Mw9kQ9HPh0RPr8t90O+0ObUPVRdhb3zCz+/bcAKO/070j3ahKW+LAk/v/f3Rr4p/sQ9b4uRuNoHQL+RkDO6bv3EPYeXhb7ACEC/g5JFPnhNuj2ikwW/3As/vw76xT4r7qQ9K9NIvwoRPb9SohQ/YcyEPat/hr8IGDq/HPXEPn6EMz2sCz2/0h84v+2EQT7CC+489Vfdvh4oN78DcsQ+SjenPMI3N783MTW/fs1APozTxzsWVNW+bjo0v0Li4rvadxK7iPn5vYFDNL+13E6+2zmZu0D/LT5JTDW/Pabfu7R4p7ohb/69PFU1v3DGTr7Vkna7xBMsPuhdNr8pQ8u+loTSuYlI6z5CZji/yLZOvq4AEDzUuio+2m45v8NSy77dokY8raHsPlx3O7/vBE++SgqvPHd+MT5YgDy/chfxu3Fwyzxld+a9/Yk8v8XlPz5+ALk8CmjLvlyUO79OpQO84NJvPFjOx73knju/7ls/PtLaTzwibMW+86k6v5cLCrxHAqM7eiO2vf60Or+KGj8+93JROyWVwr5hwDm/4vULvFhXkLsF27C9lMs5v8t8UL5Z78i7ZdpRPnHWOr9afgm8CUIFuzSot71x4Tq/Sl5QvmjMerueOE8+J+w7v/gOzL4tHWc5S9j8Pov2Pb8z+xe/kG4lPPhdST+wAEG/vyHMvkOX0zz/g/4+QwtDv13XUL6XhBI9xcVZPpUWRL9Jk8y+kvAjPeozBD9LIka/FPJRvqA+Tj0OSnI+Bi9Hv3A5zb61oGE9LnQLP2Y8Sb+eelO+YiCHPcAxij4XS0q/xi1LvJoukj1yVKS6WFtKvzh5Vb51IZI9gkqgPpdsS79Jemy8OPSePUcNMz2Cf0u/qKFXvpe+oD30PLg+hZNMvwBuiLzJe689dkO+PVmpTL+zZDU+8EmzPaiuL74qwUu/x+e9PvNCrD1j7N6+AdtJv2nlMj56bZo90n7wvQX2SL8Zwrw+JJ6VPXpTxb7MEke/SLswPufUhT2pm5C9lTBGv0fCuz6D8II9BhivvutPRL9nlQ8/Ld1pPV4kHb/FcEG/tVFBPxqUNz01VGO/+pI9v0U4Dz+gqt08G+IUv7G1Or84Wbo+MsN8PLWcj76j2Di/massPsjZIDwsZYs8n/s3vzEquj4xbSY8GoyLvgofNr8D/w4/iDuaOzngD7/mQjO/Yw66PvYW1rsJJIm+mGYxv1NnLD6g0EK8bHe6POuJML/FMLo+NVs7vK4bjL5FrS6/eBYPP0ODirwI5hG/qdArv0AZQT9I4+e8Oi1ev//zJ7+2Pw8/Wgo7veuPFb+QFiW/A/C6Pn3mar2jz5y+ADgjv9+dDz+8/oG9U9Edv69YIL+Px0E/8j6bvT2vbb+IeBy/LxwQP4FGwb0aGCm/sJYZv+4MvT6aVNy9xUfMvriyF79c0hA/Q6zsvfA6Ob88zRS/7qW+PqUnBb6UUPC+LOUSvyWYNz54xA6+EZ5hviz6Eb+TrF68oUcTvj1/zzz9CxK/9qo7PtXCEr4RPJ6+xhsRv2d6HLwoFxm+RquEvUooEb8jGk++ymoavpCBNT5hMRK/zjPKvnrJFr70b9Y+BTcUv87PSr6jNQ6+LiyrPZ46Fb/YdaO6cH8Mvgd7f75BPBW/i9RGvn+bEb7pMke7wToWv3Qkxr5vqxG+ZGp3PgA2GL/GbxS/qrgMvoD3+D7/LRu/dNRFvz/DAr53kDs/4iIfv5hGd7/bg+e9VIt7P/AUJL+v/ES/mkS/vUQaKD+CBSi/qbwSvx9fpL35Eqw+zvQqv1QJwb4Lm5a9M1qdPPriLL9KLhK/otGVvS9ckz5szy+/Wve/vrIHir1iUN2827oxv9GrEb/7Iou9GoN5PrGkNL+jW0O/+ieBvYGgAz/tjDi/zzERvxExWL0FJ08+U/8iPQUw+bvWVCo9tQSTPNhfIj3yYT8+NM0rPTfNhb5crzE9aSQQvLFkFj2UXzU93PYwPSpVUb5SBRo9xlayPro3ID1ZhyG8Fo42PeOzij34aB89V4ZSvmcaPD0Ujr8+bJEOPZ97zb5+wFo9nVEuP69h2zzj9VO+WESJPbuhzz7Id7k8LQ9TvKXgmT0z8Qk+eVu3PBJvOT4sZZ89Xo4IvtgG1TzUtHa81u6ZPQQ4Oz5Gj9I85jQ3PvVroT0qi669a9/vPHjivj5L7p09Sz60vlh6Fj2lFxE/6YKPPcOTHr9P6EQ9oNG9Pi1HbD2Db5y+SkdjPa8KMz6TP1M9oiGcOxGacT2jBr0+gKNTPRHcir5J7Ic9TogxPs9rPT3l0xg9NwaPPYRat7xJekA9fj2wPoYbjj2GKzA+Fq1cPeiiiD2BJ5U9VZK7Pj4kYj0rf1W++CikPYiTLj7UD1E9NQTPPaIkqz06zLo+rVdZPcJFM75AFro9VCUPPy0ASz31rua+gf3QPYUMuj5iFyY97BcSvsnf3z0XuSs+ZGcaPRJtJj46vuY9QXq5Psy3Jz1CuvG9z5T1PauIDj+BDB49sYfLvn8xBj5t5bg+1Pf6PJhPvr3Wlg0+SY0pPkO+6zw2SVY+8foQPjh2uD61Awc9gPiXvdRbGD6pDg4/h+8APQJutr4puSM+iP23Pmp+xzxcpFy9ORUrPr7TJz4Kq748w098PoBwLj6MPQG9uAnnPFLcCz8Syy0+3xMnPj5GID0UfIY+giIxPs8aBb28yjU9BUMRPyJ4MD4F3SU+lUZkPaT+kz5byTM+zWe2PnL0ez1j0Gw8MBU7PjrqDD+RI309XDOEviBbRj4/gbU+ofxnPVZfWj28nU0+C3wMP7JabD12R2K+3NpYPoY2Pj+AQFo9N2X9vmsSaD6FFAw/bLUxPQljPr5DR3M+pt09P016Ij2P4e2+2zuBPurGCz9i1fg83YYjvizThj4WnT0/U6vePPOh4r7RaI4+EpELP5Ulljya6RC++/2TPlkZsz7u6308YaggPveSlz5scQs/gaqYPIIABr7dJp0+HlM9P8k5gzyX0NW+jLmkPvlQCz9MOPs7SJf1vSZMqj47PT0/faGsOwoC0r713bE+2kQLP9dbQLupOe29EnC3Phw7PT9pF6y7I6LRvssBvz6BTAs/CDZcvOaE8r03lMQ+d0w9P8+BgbyQo9S+oibMPt5nCz8wjcW8ebcCvia60T4QF7M+WHfavOcZIT4XT9U+Y5cLP6SwwLz+GBO+geTaPnOgPT/COdi8Yifjvkh64j4Nxgs/GnUQvRE9I76REeg+dtk9PzqEHb2eD+2+oKnvPivvbz8/ckO97X5Ev41C+T7DKD4/lymBvTME+77kbgA/GXMMP2g+lb2kV1++/j0DPxeRtT5vLZ69U8dPPc4OBT/A/gw/hRmcvQvUh76z4Ac/1DM/P0j3pr3UvBS/qLMLPwKRDT+Tw76991GhvnqIDj/397c+aavLvX3BWb1vXxA/07kpPt7Yzb3ahFM+rzgRPxdsuT7qYsW9nrDtvV0TEz9MlCw+4yPKvaIzFD5E8BM/eCzNvE02xL2tNs8+cM8TPwJoLz6RorO9/dyqPfWvFD/cDLi8vzewvVXEsT6DkhQ/4u8xPhT/ob0c5eo8RXYVP0hnvT5q0qC9DGWTviRbFz/AOzQ+D52svX+1q7zXQRg/veSRvNh4rb0v13k+gCoYPyeyNj56eqO9NeWXvVkUGT+VmH28LoSmvazqRD4QABk/fl9WvsGjnr0qouo+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAJgW476npA+/7LcSvOeqiz2A1ei+rY9Bv0a++LsvUbc+kZPwvhB8c7++juG5XPglP9lQ+r4oiEG/o2RNPG4Etj5PBwG/eaAPvy3xoDzTzYg9reYDvxSmQb/s4qs84jG7PinGB7+Dxg+/7MnnPHEQoz1Kpgq/19NBv3vV9DznHsM+sIYOv/X8D7/iohk9gavIPehnEb9ZEkK/vqkhPRD1zT6NSRW/8EQQv8GdQj2abPo9NiwYv+FiQr8Zokw9yu/bPngPHL/6nxC/tNJvPVS2HD7z8x6/F8dCvyxcfD13Xe0+NtkivwYQEb9TK5E90opDPu6/Jb/Uxr6+rf2YPZ3doL1RqCe/nJcRvwvGlT1Na3I+wJEqv1TKQ79qeJ89MzMNPzJ8Lr/SIxK/+A+2PfqbkT5vaDG/kxTBvgy2wT16L6w8uFYzv7PdO75xksI9Aih2vjBHNL++dcK+zbm4PUQ/pT0BOTa/tIw+vt4HvD21mjq+6Sw3v8fIw74KkbQ9f0ENPh4iOb/vIhS/gDe6PaUH6j6TGDy/uBXFvm7wzD2RD0c+HRE+vy33Q77Q5tQ9VF2FvfMLP79twAo7/TvSPdqEpb4sCT+/9/dGvin+xD1vi5G42gdAv5GQM7pu/cQ9h5eFvsAIQL+DkkU+eE26PaKTBb/cCz+/DvrFPivupD0r00i/ChE9v1KiFD9hzIQ9q3+GvwgYOr8c9cQ+foQzPawLPb/SHzi/7YRBPsIL7jz1V92+Hig3vwNyxD5KN6c8wjc3vzcxNb9+zUA+jNPHOxZU1b5uOjS/QuLiu9p3EruI+fm9gUM0v7XcTr7bOZm7QP8tPklMNb89pt+7tHinuiFv/r08VTW/cMZOvtWSdrvEEyw+6F02vylDy76WhNK5iUjrPkJmOL/Itk6+rgAQPNS6Kj7abjm/w1LLvt2iRjytoew+XHc7v+8ET75KCq88d34xPliAPL9yF/G7cXDLPGV35r39iTy/xeU/Pn4AuTwKaMu+XJQ7v06lA7zg0m88WM7HveSeO7/uWz8+0tpPPCJsxb7zqTq/lwsKvEcCozt6I7a9/rQ6v4oaPz73clE7JZXCvmHAOb/i9Qu8WFeQuwXbsL2Uyzm/y3xQvlnvyLtl2lE+cdY6v1p+CbwJQgW7NKi3vXHhOr9KXlC+aMx6u544Tz4n7Du/+A7Mvi0dZzlL2Pw+i/Y9vzP7F7+QbiU8+F1JP7AAQb+/Icy+Q5fTPP+D/j5DC0O/XddQvpeEEj3FxVk+lRZEv0mTzL6S8CM96jMEP0siRr8U8lG+oD5OPQ5Kcj4GL0e/cDnNvrWgYT0udAs/ZjxJv556U75iIIc9wDGKPhdLSr/GLUu8mi6SPXJUpLpYW0q/OHlVvnUhkj2CSqA+l2xLv0l6bLw49J49Rw0zPYJ/S7+ooVe+l76gPfQ8uD6Fk0y/AG6IvMl7rz12Q749WalMv7NkNT7wSbM9qK4vvirBS7/H570+80KsPWPs3r4B20m/aeUyPnptmj3SfvC9BfZIvxnCvD4knpU9elPFvswSR79IuzA+59SFPambkL2VMEa/R8K7PoPwgj0GGK++609Ev2eVDz8t3Wk9XiQdv8VwQb+1UUE/GpQ3PTVUY7/6kj2/RTgPP6Cq3Twb4hS/sbU6vzhZuj4yw3w8tZyPvqPYOL+Zqyw+yNkgPCxlizyf+ze/MSq6PjFtJjwajIu+Ch82vwP/Dj+IO5o7OeAPv+ZCM79jDro+9hbWuwkkib6YZjG/U2csPqDQQrxsd7o864kwv8Uwuj41Wzu8rhuMvkWtLr94Fg8/Q4OKvAjmEb+p0Cu/QBlBP0jj57w6LV6///Mnv7Y/Dz9aCju9648Vv5AWJb8D8Lo+feZqvaPPnL4AOCO/350PP7z+gb1T0R2/r1ggv4/HQT/yPpu9Pa9tv4h4HL8vHBA/gUbBvRoYKb+wlhm/7gy9PppU3L3FR8y+uLIXv1zSED9DrOy98Do5vzzNFL/upb4+pScFvpRQ8L4s5RK/JZg3PnjEDr4RnmG+LPoRv5OsXryhRxO+PX/PPP0LEr/2qjs+1cISvhE8nr7GGxG/Z3ocvCgXGb5Gq4S9SigRvyMaT77Kahq+kIE1PmExEr/OM8q+eskWvvRv1j4FNxS/zs9KvqM1Dr4uLKs9njoVv9h1o7pwfwy+B3t/vkE8Fb+L1Ea+f5sRvukyR7vBOha/dCTGvm+rEb5kanc+ADYYv8ZvFL+quAy+gPf4Pv8tG7901EW/P8MCvneQOz/iIh+/mEZ3v9uD571Ui3s/8BQkv6/8RL+aRL+9RBooP4IFKL+pvBK/H1+kvfkSrD7O9Cq/VAnBvgublr0zWp08+uIsv0ouEr+i0ZW9L1yTPmzPL79a97++sgeKvWJQ3bzbujG/0asRv/sii70ag3k+saQ0v6NbQ7/6J4G9gaADP+2MOL/PMRG/ETFYvQUnTz5SdDu/EB++vpaeR72/Otq92F8iPfJhPz40zSs9N82FvlyvMT1pJBC8sWQWPZRfNT3c9jA9KlVRvlIFGj3GVrI+ujcgPVmHIbwWjjY947OKPfhoHz1XhlK+Zxo8PRSOvz5skQ49n3vNvn7AWj2dUS4/r2HbPOP1U75YRIk9u6HPPsh3uTwtD1O8peCZPTPxCT55W7c8Em85Pixlnz1ejgi+2AbVPNS0drzW7pk9BDg7PkaP0jzmNDc+9WuhPSqLrr1r3+88eOK+PkvunT1LPrS+WHoWPaUXET/pgo89w5Mev0/oRD2g0b0+LUdsPYNvnL5KR2M9rwozPpM/Uz2iIZw7EZpxPaMGvT6Ao1M9EdyKvknshz1OiDE+z2s9PeXTGD03Bo89hFq3vEl6QD1+PbA+hhuOPYYrMD4WrVw96KKIPYEnlT1Vkrs+PiRiPSt/Vb74KKQ9iJMuPtQPUT01BM89oiSrPTrMuj6tV1k9wkUzvkAWuj1UJQ8/LQBLPfWu5r6B/dA9hQy6PmIXJj3sFxK+yd/fPRe5Kz5kZxo9Em0mPjq+5j1Berk+zLcnPUK68b3PlPU9q4gOP4EMHj2xh8u+fzEGPm3luD7U9/o8mE++vdaWDT5JjSk+Q77rPDZJVj7x+hA+OHa4PrUDBz2A+Je91FsYPqkODj+H7wA9Am62vim5Iz6I/bc+an7HPFykXL05FSs+vtMnPgqrvjzDT3w+gHAuPow9Ab24Cec8UtwLPxLLLT7fEyc+PkYgPRR8hj6CIjE+zxoFvbzKNT0FQxE/IngwPgXdJT6VRmQ9pP6TPlvJMz7NZ7Y+cvR7PWPQbDwwFTs+OuoMP5EjfT1cM4S+IFtGPj+BtT6h/Gc9Vl9aPbydTT4LfAw/slpsPXZHYr7c2lg+hjY+P4BAWj03Zf2+axJoPoUUDD9stTE9CWM+vkNHcz6m3T0/TXoiPY/h7b7bO4E+6sYLP2LV+DzdhiO+LNOGPhadPT9Tq94886HivtFojj4SkQs/lSWWPJrpEL77/ZM+WRmzPu7rfTxhqCA+95KXPmxxCz+Bqpg8ggAGvt0mnT4eUz0/yTmDPJfQ1b6MuaQ++VALP0w4+ztIl/W9JkyqPjs9PT99oaw7CgLSvvXdsT7aRAs/11tAu6k57b0ScLc+HDs9P2kXrLsjotG+ywG/PoFMCz8INly85oTyvTeUxD53TD0/z4GBvJCj1L6iJsw+3mcLPzCNxbx5twK+JrrRPhAXsz5Yd9q85xkhPhdP1T5jlws/pLDAvP4YE76B5No+c6A9P8I52LxiJ+O+SHriPg3GCz8adRC9ET0jvpER6D522T0/OoQdvZ4P7b6gqe8+K+9vPz9yQ73tfkS/jUL5PsMoPj+XKYG9MwT7vuRuAD8Zcww/aD6VvaRXX77+PQM/F5G1Pm8tnr1Tx089zg4FP8D+DD+FGZy9C9SHvrPgBz/UMz8/SPemvdS8FL+osws/ApENP5PDvr33UaG+eogOP/f3tz5pq8u9fcFZvW9fED/TuSk+3tjNvdqEUz6vOBE/F2y5Pupixb2esO29XRMTP0yULD7jI8q9ojMUPkTwEz94LM28TTbEva02zz5wzxM/AmgvPpGis7393Ko99a8UP9wMuLy/N7C9VcSxPoOSFD/i7zE+FP+hvRzl6jxFdhU/SGe9PmrSoL0MZZO+JFsXP8A7ND4Pnay9f7WrvNdBGD+95JG82HitvS/XeT6AKhg/J7I2Pnp6o7015Ze9WRQZP5WYfbwuhKa9rOpEPhAAGT9+X1a+waOevSqi6j6q7Rc/OaFYvHbei71+nxE+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.8257688283920288, 0.5626216530799866, 0.23873643577098846, 0.9067564606666565, 0.7856099605560303, 0.49766361713409424, 0.8052246570587158, 0.4569495916366577, 0.826154887676239, 0.40981125831604004, 0.8473221659660339, 0.357986181974411, 0.8677178621292114, 0.30410391092300415, 0.8865046501159668, 0.7486224174499512, 0.5095136761665344, 0.2117650955915451, 0.9142415523529053, 0.8285714387893677, 0.6418746709823608, 0.6049680709838867, 0.6971065998077393, 0.5416725277900696, 0.2561173737049103, 0.892861008644104, 0.7902103662490845, 0.602611780166626, 0.6124669909477234, 0.6567437052726746, 0.442424476146698, 0.26543065905570984, 0.15198110044002533, 0.9127352237701416, 0.8533020615577698, 0.26722103357315063, 0.859377920627594, 0.7457646131515503, 0.537477433681488, 0.7126670479774475, 0.5382471084594727, 0.2885003089904785, 0.8654332160949707, 0.27987420558929443, 0.8691206574440002, 0.7341107130050659, 0.5083977580070496, 0.7070846557617188, 0.5116556286811829, 0.7083638906478882, 0.5089176893234253, 0.7143175601959229, 0.49994274973869324, 0.7362181544303894, 0.5003710389137268, 0.26436078548431396, 0.1265174001455307, 0.9294705390930176, 0.8794173002243042, 0.23314137756824493, 0.8861583471298218, 0.2124416083097458, 0.8937957286834717, 0.8099349737167358, 0.3407258987426758, 0.8290203809738159, 0.302369087934494, 0.8464890718460083, 0.7347143292427063, 0.5731024742126465, 0.5978360772132874, 0.6071500182151794, 0.5675453543663025, 0.6360226273536682, 0.46020692586898804, 0.29985934495925903, 0.8216624855995178, 0.7090388536453247, 0.5391409397125244, 0.6603631973266602, 0.46193012595176697, 0.7209643125534058, 0.5536842346191406, 0.6486561298370361, 0.43428632616996765, 0.2470548003911972, 0.8723310828208923, 0.7935767769813538, 0.3426437973976135, 0.16905328631401062, 0.9184088706970215, 0.877114474773407, 0.2011764645576477, 0.9117933511734009, 0.8626859188079834, 0.7626271843910217, 0.42835715413093567, 0.8348175883293152, 0.691239058971405, 0.4503331482410431, 0.7731600999832153, 0.44171008467674255, 0.8382903337478638, 0.6743131279945374, 0.4115300178527832, 0.19679485261440277, 0.09697017073631287, 0.9424310326576233, 0.8917253017425537, 0.7317364811897278, 0.5915199518203735, 0.6917995810508728, 0.6348872780799866, 0.3492182791233063, 0.8458874225616455, 0.6246107220649719, 0.633935809135437, 0.7183441519737244, 0.3329625129699707, 0.8886919021606445, 0.28432315587997437, 0.09692191332578659, 0.9518459439277649, 0.9200588464736938, 0.8263546824455261, 0.42920032143592834, 0.8587599992752075, 0.6513775587081909, 0.3166543245315552, 0.8752435445785522, 0.6515479683876038, 0.7288286685943604, 0.610114574432373, 0.24187780916690826, 0.9096800088882446, 0.7953376770019531, 0.49959492683410645, 0.8199946284294128, 0.5503228902816772, 0.7696138024330139, 0.42422568798065186, 0.849068284034729, 0.6130354404449463, 0.7291445136070251, 0.37080830335617065, 0.8672952651977539, 0.6579036116600037, 0.6932432055473328, 0.3313560485839844, 0.11992057412862778, 0.9412961006164551, 0.10656625032424927, 0.9456089735031128, 0.9074525833129883, 0.7886638641357422, 0.4975608289241791, 0.8132179975509644, 0.5485888123512268, 0.7597054839134216, 0.5700709819793701, 0.7503189444541931, 0.5799199342727661, 0.7484939694404602, 0.4210088849067688, 0.8413461446762085, 0.5897952914237976, 0.7458730936050415, 0.579553484916687, 0.7573979496955872, 0.5591249465942383, 0.7746632099151611, 0.5280197858810425, 0.7963343262672424, 0.5141072273254395, 0.7851057052612305, 0.45569196343421936, 0.8341508507728577, 0.40394654870033264, 0.14309191703796387, 0.9374598264694214, 0.8845937848091125, 0.7365036010742188, 0.5504610538482666, 0.2136368602514267, 0.9179798364639282, 0.8351991176605225, 0.6383849382400513, 0.6346977949142456, 0.7075672149658203, 0.4456018805503845, 0.7734357118606567, 0.5113584995269775, 0.7308218479156494, 0.4294901192188263, 0.826607346534729, 0.6374543905258179, 0.6162071228027344, 0.6886051893234253, 0.44197309017181396, 0.763580858707428], "advantages": [23.95086097717285, 22.991289138793945, 21.537565231323242, 19.930086135864258, 21.27425765991211, 22.73177146911621, 21.32071304321289, 22.721357345581055, 21.31170654296875, 22.64296531677246, 21.235586166381836, 22.485576629638672, 21.085309982299805, 22.241727828979492, 20.85965919494629, 21.908767700195312, 22.75075912475586, 21.792234420776367, 20.482646942138672, 21.293676376342773, 21.96241569519043, 22.550926208496094, 21.83930206298828, 22.339046478271484, 21.627050399780273, 20.795997619628906, 21.122188568115234, 21.445466995239258, 22.096115112304688, 21.277395248413086, 21.846481323242188, 23.073759078979492, 24.91632652282715, 27.05020523071289, 25.446693420410156, 23.597171783447266, 25.451757431030273, 23.49586296081543, 21.63089942932129, 20.332618713378906, 20.778114318847656, 19.474611282348633, 18.725296020507812, 18.40410614013672, 17.706119537353516, 17.378328323364258, 17.69611167907715, 18.8607120513916, 17.083532333374023, 18.235816955566406, 16.39839744567871, 17.552953720092773, 15.65087890625, 14.328669548034668, 14.592450141906738, 13.27332592010498, 12.489974975585938, 11.317055702209473, 11.115880012512207, 10.679228782653809, 10.003006935119629, 9.593965530395508, 8.885342597961426, 8.538631439208984, 8.45246696472168, 7.66885232925415, 7.508100509643555, 6.728886604309082, 6.521304607391357, 6.7088446617126465, 7.3170695304870605, 5.9806108474731445, 6.441709041595459, 5.097110748291016, 5.425076961517334, 6.047134876251221, 6.868538856506348, 5.49683141708374, 3.9302456378936768, 2.3060507774353027, 2.482945680618286, 2.9832944869995117, 1.3077181577682495, -0.47211751341819763, -0.2787628769874573, 0.25314730405807495, 1.0077744722366333, -0.6019788980484009, -2.5261166095733643, -1.8333390951156616, -0.9061185717582703, -2.638707160949707, -4.73069953918457, -3.705681800842285, -5.840037822723389, -8.381463050842285, -11.129940032958984, -10.104172706604004, -12.916311264038086, -15.562175750732422, -17.688013076782227, -17.878828048706055, -16.883323669433594, -19.563915252685547, -21.731664657592773, -23.403392791748047, -25.000246047973633, -27.16853904724121, -27.425058364868164, -28.188426971435547, -28.61065673828125, -30.048154830932617, -30.426204681396484, -31.88494110107422, -33.06418228149414, -33.91288757324219, 6.634699821472168, 6.084136486053467, 6.241830825805664, 7.4102091789245605, 5.696282863616943, 6.8732075691223145, 9.031584739685059, 6.211906433105469, 4.569305896759033, 3.941854238510132, 4.253909587860107, 3.5661749839782715, 3.669088840484619, 4.778134346008301, 3.494128704071045, 3.2404539585113525, 3.251777172088623, 3.1045773029327393, 4.017563343048096, 2.887772560119629, 2.7893154621124268, 2.906893014907837, 2.7356178760528564, 3.635089635848999, 2.930985927581787, 3.4188101291656494, 3.0205795764923096, 3.7671284675598145, 3.3917672634124756, 4.333363056182861, 3.613539934158325, 4.097507953643799, 4.1393208503723145, 5.5793843269348145, 8.023341178894043, 5.890435218811035, 8.471368789672852, 6.36739444732666, 5.106582164764404, 4.97397518157959, 6.078526496887207, 5.716768264770508, 6.614887237548828, 6.722918510437012, 7.381338119506836, 7.771437644958496, 8.224390029907227, 8.801892280578613, 10.495489120483398, 9.615983009338379, 9.84536075592041, 10.4797945022583, 10.697399139404297, 11.139212608337402, 11.398507118225098, 11.568729400634766, 11.90092658996582, 11.765414237976074, 12.209936141967773, 11.789682388305664, 12.265124320983887, 11.655421257019043, 12.231867790222168, 14.454611778259277, 12.044990539550781, 10.694457054138184, 10.092092514038086, 9.982656478881836, 11.03602123260498, 9.159738540649414, 8.151397705078125, 7.595863342285156, 7.089807510375977, 6.472036838531494, 5.983863353729248, 5.3483476638793945, 4.860368728637695, 4.220131874084473, 3.800036907196045, 3.0389490127563477, 2.4920849800109863, 1.820901870727539, 1.2539622783660889, 0.7226181030273438]}
+{"type": "SampleBatch", "eps_id": [1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 1747731318, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAKrtFz85oVi8dt6LvX6fET5V3Bc/EjI7PkgLhr1+Ui++8ssYP3TywT6VDo29PrX3vnO8Gj9/Jj0+pN+gvd65Wr6Qrhs/SAHDPmWfqb16rQe/xqEdP7i5Ez/AVL+9+PpYvyGWID+bQsQ+RAzivdLuFb+OjCI/G3ZCPoQJ+r1mzqi+d4UjP9Z6V7tWxQO+hHyfvSiBIz+4EEm+n10FvgG4Lz7LfyI/ll7HvvHZAb5DT9c+aIEgP4lZRb5XevK9Gca5PcyEHz+SncW+LcPuvfAIsD7nih0/SvFBvvut4L0ZWIk8qJIcP978w74u/t+9KbyLPu2cGj9wgRO/adDUvdx5Bz+zqRc/53HCvlAjv73rmlI+67cVP3HNEr+3tra9+13vPkrIEj9qZ0S/epCjvYIVOz+02g4/3jESv4Shhb3uAtQ+L+4LP9MLwL4KV2m9ygHQPYwCCj85gDe+DQVhvXGJVb6qFwk/FICIPEkacr1cnQS/gS0JP2XXNb4KRY69eGh6vsBECD8fSL6+OEmYvVZ50DyhXQY/csAzvl8+l72AWJS+jHcFP5gzvb5+HKO9zOmtvDCTAz/PhjG+GvujvYAGrb70rwI/kMS6PKnSsb1Snie/1s0CPz0eL75QpMy9ywDIvrDtAT8Iq7q+YKTcvcsxBr7RDwA/KCIsvocC4r1tT+m++Wb+PsQWub65rPS9W4RMvlGz+j7QAg6/+tr8vbUjWT0gBfU+hVW3vhmv+r0JR42+dFrxPigfDb87/gK+9nHGvF616z4miLW+PH0Dvndwtb7sE+g+ETIMvy2/Cr6Rlta9UnjiPv+WPb+F5Ay+MrARPuzi2j6X+W6/mfoJvhX0xj7RU9E+SjCQv1AFAr42zCI/1srFPjrpqL9u/um9RdZiP4lHuD46w4+/MLPFvZJNDz9Hx6w+joyov3/Frr3ZD1I/ZUufPtxcwb9dKY29lOiKP1PTjz6iR6i/9GtBvTuQRT/0XII+GCvBv48zAr3kS4Y/vNFlPgAkqL+5ATG82Sc/P7LqSj7RGMG/UliHO4OahD9yBSw+3iCov56RyzztnD4/6B4RPqIyj7/TxyI9wG3rPgxr9D0JPqi//HJIPWvHQz+ilL49t0zBv5mMgz16RYk/h7mAPZ9wqL/fea89zvJMP9ulFT0/jcG/j0TQPWY5jz+uMs47rcOov30Z/j1vHFw/CHikvM0HkL+eqBA+RcsbP/hpLr0brm6/SB8dPo8duj6kynq92Fw9vxqRJD6oofs9nrGbvUYVDL9HFSc++sDrvWsbsr1ehj6/wLkkPpeoZT5Rl9C9nz4Nv5pRKT6WwV+8szDnvR+xP78ACik+1Q+nPjPuAr5aInK/trgvPqeIKj8bTRa+CONAvz5dPT41M90+brslvle2D79VNkY+Ur1PPqg6Mb6WK72+9V1KPq3bvLzCyzi+tuo1vhflST4HZn2+LG88vlL7v76w00Q+zovNPREdRL5pbzu+4+FGPjMUAb683Ee+0OUQPAFNRD68FbS+Xq5HvnvvQL7vGD0+4O+ruzOKS75IcsW+bP08PobYrT4McFO+ATUVv5vxQz62LC8/z19fvjEZyL4u9VE+GTLqPjcoPr27GZO8WCSQPPfrGz3KoD+9jCc1PvtgljxG/X6+vyIxvZRPl7w8KVs81FpKPRqmMr2a6Vq+dllrPBxLsT5uKUS9Co+avJtorjxdNm49GrVFvTpuW77n77c8tQe3PgpDV722rZ+8uoHyPJFgkz3R21i94GUzPgNM/jxkTli+v4FKvQdevT4csNs8Xun8vkM1LL2ajTI+l8GKPCCWRb5/7B29iQ29PudIVjz+4vW+x1n/vEUdMj5Rq2M74d87vjfa4rwe7q28lfZMueTn4T29VOa8BREyPgDFAztL0Tq+ItfJvMHrvD7Atta69e/yvrxijbyJDzI+slE2vE6yOr68ymG8gKKsvOAPcrw3yto9hLJovGlwMj49Dk+8NwxDvsiYL7w0bam8RryGvJcZyT22Xza8K90yPn5LbbyTbEy+W0b6u11wvT72Wpe8B2P+vk9gebk7WDM+VMLovEIWV76j+VU750KgvCWWBb10nJY9VVU8Oy48ND7GH/+8z8BqvmGE0TtKzZi8olcSvQ/sWj3/SsU7xFxavsD2Db3tVqs+BirmOiWQkLxdGeW8Fur/PGvntzoSdFm+FfvfvKhDoT5rYzq7NhaKvFBgrLyQ8mE8dHtQu7LYNj7jHaq8USeSvm59zDkAMYW8yuLYvNK8HjoWBYg4EQ5YvmTJ2LzM05E+XSaIuwP8fbw3H6q8E9KDvCpPkru7Ojg+I8KsvOdoob4hOGO66C50vNNo4LxI/O+8z62YujzvOD6NNeW8djapvm9gIDucOGe8u60NvWaLP70H4Q07NbtVvnCCEb2WcXA+cLIDuyTPVrxNjPy8NgmNvbvhFLuow1S+XOoDvX4RWz4qnNK7nArOvq7H5Lw6Gf4+6SttvOniU77td5O845xHPtJ8mLxvKT28kg9nvOq0070TYZq87GpTvop3hLyLQz0+vjS8vF8TNryVXky8WT7nvdsGvrwbmzw+UV5xvICq0b6P2Z+8MbYvvADHu7zf1fi9YpuhvMAmPT4mr8+8NbjXvr5Xg7xsbCS8clsKvWkGDL6r/IS81ZFRvimPFb2wkxQ+pISmvEr9E7xOrAm9h7Eivn7/p7ydi1C+RLAWveft+z2DXcm8i4kDvIOcDL09ZDm+P67KvLQRQD5UcRu9zP33vhjzq7zIVuW7Dx9DvSvEUL6mGK28/T1OvpfSU70HWJY9UxjOvAmgyr4Sz029jNyyPrB3B717wEy+5zAxvQjwKD0C2Re9QuGKu/HPLb2Sj4e+5DEYvct/S76DgEO9tzppPI55KL33x0G7+1VCvWoTlr6Qtyi9uRtKvhZZWr2xKYG8vuI4vZWLyL6+o1u9de6EPhP5WL2OBBa/315GvTrxCD9EfYS9XM3Hvo+MGr3Y2Gg+N3mUvY+1Fb/Z6we9FwgCP05trL3sSMe+Up+8vBbnUT6oXry9f29GvrUJm7zvTMK9o07EvY/6xr77lKq8u2FEPrk51L2g2kW+LymLvCH0272/I9y95rPGvtbBnLx4MTg+LgnsvVpTRb6Pkn68nUPzvcvt8739CC07VL+SvJ4o1b4c0vO9LNZEvkn11ryXcgS+t7H7vZQfxr5XJuy8b6oePqTFBb5+5xS/ZMPSvLl14D4zrxG+fbnFvqTvirzEBQ0+5pcZvhLAFL+3vmi81JnZPk5+Jb4gf8W+9/W6u4/xAj6sZC2+5hpDvlZQTrvzmSq+nEsxvoxuxb5/V9S7WBYAPk8xOb7e9kK+rV2Cu9C0Lb6HFz2+kcGcO7eJ8bsZWe2+cv48vn7MQr77VYi8kWAxvtHjQL6KLcW+WrekvDnQ6T3rxki+CUNCvtoCkrz2OT2+iqlMvojmxL6RSbC8m1bRPc2JVL4kUhS/UoqfvDesxj5pZ2C+vpnEvhnuP7wN1bY9mkRovkA1FL9LrSK8XqjBPuYfdL4uccS+rPEau6rWqD14+3u+cwtBvqiMO7oJDVi+29d/vmhrxL5dt6G7tdmmPZnZg75Q9kC+9aVYu1LfWb6Ux4W+91vEvi/D97vRh6E98LSJvlYaFL+aEsS7jwK9PoOhj76TQsS+xXC3Og/GmD1djpO+HRYUv9N+PTviRbw+xXqZvppKxL5a3ic8IoybPchnnb5XIhS/k8FAPPVivj6tVKO+rSBGv0FNnTxUECs/gEGrvgE/FL8sZAU9mmbDPosvsb6H1cS+yqckPZmTyz1VH7W+5nlCvmnMLD3SqDi+MRG3vqtvxb6WBh49R10APhAEu76Vp0O+fUsoPb+gHr7x+Ly+HohhO8qaGz2CY96+6++8vlzPRL5+C/A8Iw8FvsHnvr6NYCM7ZMHaPMeT07444b6+DKFFvvwMlzym5uW9JtvAvj0+8zqcqIQ8TFLMvkjWwL7KIka+MY0GPIGBz72D0cK+uAfHvoGzyjuOmEY+i8zGvrtYRr675iQ8ljbGvVDIyL5/kKc66i8FPMLFxb72xMi+7JxGvuab0znvcrq9acHKvvnNlzqfwLm6k2bEvmC+yr4omUa+XuoUvJoau73Jusy+YCLHviTaMrxuNEs+WrbQvlt6Fb82p+O7ya75PgKx1r7uAMe+rOE3O6VsRT7oq9q+mnYVv9lK2juzBPk+aabgvpUSx75MQoY8qHxIPqmh5L6PlUa+PVamPM7Cu70Knua+K1fHvuVQlzy2UFQ+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAFXcFz8SMjs+SAuGvX5SL77yyxg/dPLBPpUOjb0+tfe+c7waP38mPT6k36C93rlavpCuGz9IAcM+ZZ+pvXqtB7/GoR0/uLkTP8BUv734+li/IZYgP5tCxD5EDOK90u4Vv46MIj8bdkI+hAn6vWbOqL53hSM/1npXu1bFA76EfJ+9KIEjP7gQSb6fXQW+AbgvPst/Ij+WXse+8dkBvkNP1z5ogSA/iVlFvld68r0Zxrk9zIQfP5Kdxb4tw+698AiwPueKHT9K8UG++63gvRlYiTyokhw/3vzDvi7+370pvIs+7ZwaP3CBE79p0NS93HkHP7OpFz/nccK+UCO/veuaUj7rtxU/cc0Sv7e2tr37Xe8+SsgSP2pnRL96kKO9ghU7P7TaDj/eMRK/hKGFve4C1D4v7gs/0wvAvgpXab3KAdA9jAIKPzmAN74NBWG9cYlVvqoXCT8UgIg8SRpyvVydBL+BLQk/Zdc1vgpFjr14aHq+wEQIPx9Ivr44SZi9VnnQPKFdBj9ywDO+Xz6XvYBYlL6MdwU/mDO9vn4co73M6a28MJMDP8+GMb4a+6O9gAatvvSvAj+QxLo8qdKxvVKeJ7/WzQI/PR4vvlCkzL3LAMi+sO0BPwirur5gpNy9yzEGvtEPAD8oIiy+hwLivW1P6b75Zv4+xBa5vrms9L1bhEy+UbP6PtACDr/62vy9tSNZPSAF9T6FVbe+Ga/6vQlHjb50WvE+KB8Nvzv+Ar72cca8XrXrPiaItb48fQO+d3C1vuwT6D4RMgy/Lb8KvpGW1r1SeOI+/5Y9v4XkDL4ysBE+7OLaPpf5br+Z+gm+FfTGPtFT0T5KMJC/UAUCvjbMIj/WysU+Oumov27+6b1F1mI/iUe4PjrDj78ws8W9kk0PP0fHrD6OjKi/f8WuvdkPUj9lS58+3FzBv10pjb2U6Io/U9OPPqJHqL/0a0G9O5BFP/Rcgj4YK8G/jzMCveRLhj+80WU+ACSov7kBMbzZJz8/supKPtEYwb9SWIc7g5qEP3IFLD7eIKi/npHLPO2cPj/oHhE+ojKPv9PHIj3Abes+DGv0PQk+qL/8ckg9a8dDP6KUvj23TMG/mYyDPXpFiT+HuYA9n3Cov995rz3O8kw/26UVPT+Nwb+PRNA9ZjmPP64yzjutw6i/fRn+PW8cXD8IeKS8zQeQv56oED5Fyxs/+GkuvRuubr9IHx0+jx26PqTKer3YXD2/GpEkPqih+z2esZu9RhUMv0cVJz76wOu9axuyvV6GPr/AuSQ+l6hlPlGX0L2fPg2/mlEpPpbBX7yzMOe9H7E/vwAKKT7VD6c+M+4Cvloicr+2uC8+p4gqPxtNFr4I40C/Pl09PjUz3T5uuyW+V7YPv1U2Rj5SvU8+qDoxvpYrvb71XUo+rdu8vMLLOL626jW+F+VJPgdmfb4sbzy+Uvu/vrDTRD7Oi809ER1EvmlvO77j4UY+MxQBvrzcR77Q5RA8AU1EPrwVtL5erke+e+9Avu8YPT7g76u7M4pLvkhyxb5s/Tw+htitPgxwU74BNRW/m/FDPrYsLz/PX1++MRnIvi71UT4ZMuo+0WBnvk7fS75YU1s+e15xPsqgP72MJzU++2CWPEb9fr6/IjG9lE+XvDwpWzzUWko9GqYyvZrpWr52WWs8HEuxPm4pRL0Kj5q8m2iuPF02bj0atUW9Om5bvufvtzy1B7c+CkNXvbatn7y6gfI8kWCTPdHbWL3gZTM+A0z+PGROWL6/gUq9B169Phyw2zxe6fy+QzUsvZqNMj6XwYo8IJZFvn/sHb2JDb0+50hWPP7i9b7HWf+8RR0yPlGrYzvh3zu+N9rivB7urbyV9ky55OfhPb1U5rwFETI+AMUDO0vROr4i18m8weu8PsC21rr17/K+vGKNvIkPMj6yUTa8TrI6vrzKYbyAoqy84A9yvDfK2j2Esmi8aXAyPj0OT7w3DEO+yJgvvDRtqbxGvIa8lxnJPbZfNrwr3TI+fkttvJNsTL5bRvq7XXC9PvZal7wHY/6+T2B5uTtYMz5Uwui8QhZXvqP5VTvnQqC8JZYFvXSclj1VVTw7Ljw0PsYf/7zPwGq+YYTRO0rNmLyiVxK9D+xaPf9KxTvEXFq+wPYNve1Wqz4GKuY6JZCQvF0Z5bwW6v88a+e3OhJ0Wb4V+9+8qEOhPmtjOrs2Foq8UGCsvJDyYTx0e1C7stg2PuMdqrxRJ5K+bn3MOQAxhbzK4ti80rweOhYFiDgRDli+ZMnYvMzTkT5dJoi7A/x9vDcfqrwT0oO8Kk+Su7s6OD4jwqy852ihviE4Y7roLnS802jgvEj877zPrZi6PO84Po015bx2Nqm+b2AgO5w4Z7y7rQ29Zos/vQfhDTs1u1W+cIIRvZZxcD5wsgO7JM9WvE2M/Lw2CY29u+EUu6jDVL5c6gO9fhFbPiqc0rucCs6+rsfkvDoZ/j7pK2286eJTvu13k7zjnEc+0nyYvG8pPbySD2e86rTTvRNhmrzsalO+ineEvItDPT6+NLy8XxM2vJVeTLxZPue92wa+vBubPD5RXnG8gKrRvo/Zn7wxti+8AMe7vN/V+L1im6G8wCY9Piavz7w1uNe+vleDvGxsJLxyWwq9aQYMvqv8hLzVkVG+KY8VvbCTFD6khKa8Sv0TvE6sCb2HsSK+fv+nvJ2LUL5EsBa95+37PYNdybyLiQO8g5wMvT1kOb4/rsq8tBFAPlRxG73M/fe+GPOrvMhW5bsPH0O9K8RQvqYYrbz9PU6+l9JTvQdYlj1TGM68CaDKvhLPTb2M3LI+sHcHvXvATL7nMDG9CPAoPQLZF71C4Yq78c8tvZKPh77kMRi9y39LvoOAQ723Omk8jnkovffHQbv7VUK9ahOWvpC3KL25G0q+FllavbEpgby+4ji9lYvIvr6jW7117oQ+E/lYvY4EFr/fXka9OvEIP0R9hL1czce+j4wavdjYaD43eZS9j7UVv9nrB70XCAI/Tm2svexIx75Sn7y8FudRPqhevL1/b0a+tQmbvO9Mwr2jTsS9j/rGvvuUqry7YUQ+uTnUvaDaRb4vKYu8IfTbvb8j3L3ms8a+1sGcvHgxOD4uCey9WlNFvo+SfrydQ/O9y+3zvf0ILTtUv5K8nijVvhzS870s1kS+SfXWvJdyBL63sfu9lB/Gvlcm7Lxvqh4+pMUFvn7nFL9kw9K8uXXgPjOvEb59ucW+pO+KvMQFDT7mlxm+EsAUv7e+aLzUmdk+Tn4lviB/xb739bq7j/ECPqxkLb7mGkO+VlBOu/OZKr6cSzG+jG7Fvn9X1LtYFgA+TzE5vt72Qr6tXYK70LQtvocXPb6RwZw7t4nxuxlZ7b5y/jy+fsxCvvtViLyRYDG+0eNAvootxb5at6S8OdDpPevGSL4JQ0K+2gKSvPY5Pb6KqUy+iObEvpFJsLybVtE9zYlUviRSFL9Sip+8N6zGPmlnYL6+mcS+Ge4/vA3Vtj2aRGi+QDUUv0utIrxeqME+5h90vi5xxL6s8Rq7qtaoPXj7e75zC0G+qIw7ugkNWL7b13++aGvEvl23obu12aY9mdmDvlD2QL71pVi7Ut9ZvpTHhb73W8S+L8P3u9GHoT3wtIm+VhoUv5oSxLuPAr0+g6GPvpNCxL7FcLc6D8aYPV2Ok74dFhS/0349O+JFvD7Fepm+mkrEvlreJzwijJs9yGedvlciFL+TwUA89WK+Pq1Uo76tIEa/QU2dPFQQKz+AQau+AT8UvyxkBT2aZsM+iy+xvofVxL7KpyQ9mZPLPVUftb7meUK+acwsPdKoOL4xEbe+q2/FvpYGHj1HXQA+EAS7vpWnQ759Syg9v6AevvH4vL4eiGE7ypobPYJj3r7r77y+XM9Evn4L8DwjDwW+wee+vo1gIztkwdo8x5PTvjjhvr4MoUW+/AyXPKbm5b0m28C+PT7zOpyohDxMUsy+SNbAvsoiRr4xjQY8gYHPvYPRwr64B8e+gbPKO46YRj6LzMa+u1hGvrvmJDyWNsa9UMjIvn+QpzrqLwU8wsXFvvbEyL7snEa+5pvTOe9yur1pwcq++c2XOp/AubqTZsS+YL7KviiZRr5e6hS8mhq7vcm6zL5gIse+JNoyvG40Sz5attC+W3oVvzan47vJrvk+ArHWvu4Ax76s4Tc7pWxFPuir2r6adhW/2UraO7ME+T5ppuC+lRLHvkxChjyofEg+qaHkvo+VRr49VqY8zsK7vQqe5r4rV8e+5VCXPLZQVD6pmuq+ySZHvlRJuTwWvKK9lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.5154235363006592, 0.2407984882593155, 0.8930491209030151, 0.20471283793449402, 0.0959024727344513, 0.942366361618042, 0.9147151112556458, 0.8594895005226135, 0.7571074962615967, 0.6010606288909912, 0.579083263874054, 0.661003828048706, 0.5139110088348389, 0.7095549702644348, 0.5481089949607849, 0.6325888633728027, 0.593794584274292, 0.40812650322914124, 0.7593300938606262, 0.5699806213378906, 0.3511321544647217, 0.19731508195400238, 0.8849126100540161, 0.8208212852478027, 0.2828945517539978, 0.8361754417419434, 0.2529817819595337, 0.14979971945285797, 0.9049645662307739, 0.8643420934677124, 0.19566157460212708, 0.8765650987625122, 0.8275020718574524, 0.24515573680400848, 0.8461055159568787, 0.21365191042423248, 0.8620225787162781, 0.8132255673408508, 0.7391813397407532, 0.6320911645889282, 0.49179768562316895, 0.6634432077407837, 0.5171874165534973, 0.35053154826164246, 0.7950902581214905, 0.3392367362976074, 0.8140704035758972, 0.3081779181957245, 0.8440852761268616, 0.7421359419822693, 0.42034468054771423, 0.2058154195547104, 0.9088913202285767, 0.1409517526626587, 0.9379616975784302, 0.9134494066238403, 0.8646001815795898, 0.7633389234542847, 0.5767053961753845, 0.6558250188827515, 0.7236817479133606, 0.5068705081939697, 0.16148389875888824, 0.944336473941803, 0.9127411842346191, 0.8364307880401611, 0.6637890934944153, 0.5920984745025635, 0.8096596002578735, 0.6075601577758789, 0.6472212672233582, 0.239411860704422, 0.07584802806377411, 0.9601652026176453, 0.944404125213623, 0.6352842450141907, 0.7237421274185181, 0.35462144017219543, 0.8774682879447937, 0.32610297203063965, 0.8871390223503113, 0.7081003189086914, 0.35540705919265747, 0.8692069053649902, 0.35613197088241577, 0.8721439242362976, 0.6560870409011841, 0.7107724547386169, 0.34210875630378723, 0.8800112009048462, 0.6835890412330627, 0.6796474456787109, 0.6989685297012329, 0.6628954410552979, 0.28434285521507263, 0.9001704454421997, 0.7520711421966553, 0.5877953767776489, 0.7782660722732544, 0.4555862843990326, 0.8298412561416626, 0.4791680872440338, 0.8212074637413025, 0.5041744112968445, 0.8197965025901794, 0.5323110818862915, 0.7942827343940735, 0.4500851035118103, 0.8427754640579224, 0.41224443912506104, 0.8581467866897583, 0.6321430802345276, 0.7220175266265869, 0.6597849130630493, 0.3012312650680542, 0.8910598754882812, 0.6973886489868164, 0.680000901222229, 0.6864230632781982, 0.309643417596817, 0.8883459568023682, 0.2807754874229431, 0.8978433609008789, 0.7537546753883362, 0.5817849040031433, 0.776976466178894, 0.5429124236106873, 0.20139668881893158, 0.9196030497550964, 0.8297119736671448, 0.5751257538795471, 0.7580527067184448, 0.389935702085495, 0.8625457882881165, 0.34107786417007446, 0.8785499930381775, 0.7080844640731812, 0.36989837884902954, 0.8616166114807129, 0.3872119188308716, 0.8587493300437927, 0.6092347502708435, 0.7477098703384399, 0.5950357913970947, 0.7570720314979553, 0.5818967819213867, 0.23456957936286926, 0.9069869518280029, 0.7869290709495544, 0.47435855865478516, 0.8227680921554565, 0.4748462438583374, 0.8261275887489319, 0.5362269282341003, 0.7895361185073853, 0.5338279604911804, 0.20864850282669067, 0.9144184589385986, 0.8064946532249451, 0.494231641292572, 0.8152246475219727, 0.5249249339103699, 0.7980294823646545, 0.5219731330871582, 0.8025006055831909, 0.4904705882072449, 0.8125725984573364, 0.48787981271743774, 0.8144630789756775, 0.5175417065620422, 0.8051778078079224, 0.5016656517982483, 0.8153951168060303, 0.47676408290863037, 0.17097119987010956, 0.9315175414085388, 0.8540765047073364, 0.6266636848449707, 0.7042249441146851, 0.6643496751785278, 0.3308687210083008, 0.8660773634910583, 0.3480274975299835, 0.8616894483566284, 0.3568207323551178, 0.8604270815849304, 0.6429173946380615, 0.7116952538490295, 0.3702296018600464, 0.8559997081756592, 0.36393237113952637, 0.8605296015739441, 0.6505918502807617, 0.3036905527114868, 0.8867617845535278, 0.290048211812973, 0.8919240236282349, 0.7318577170372009, 0.588309645652771, 0.7470623850822449], "advantages": [6.995183944702148, 6.387371063232422, 6.1750898361206055, 5.257962226867676, 5.12085485458374, 6.14564323425293, 4.199608325958252, 3.053567409515381, 2.3646302223205566, 1.8006842136383057, 1.200494408607483, 0.36818861961364746, -0.19588571786880493, -1.0585012435913086, -1.594315528869629, -2.2148935794830322, -3.1124563217163086, -3.7041049003601074, -4.381546974182129, -5.305499076843262, -6.269021987915039, -7.244521617889404, -8.026840209960938, -8.60002326965332, -9.063530921936035, -10.0787353515625, -10.535754203796387, -11.564391136169434, -12.162471771240234, -12.858068466186523, -13.282214164733887, -14.26744270324707, -14.668869018554688, -14.946483612060547, -16.243879318237305, -16.416399002075195, -17.739198684692383, -17.798625946044922, -17.940139770507812, -18.42546272277832, -19.29154396057129, -20.429227828979492, -21.683347702026367, -22.71961784362793, -23.920515060424805, -25.268037796020508, -26.362838745117188, -27.77870750427246, -28.72662353515625, -30.143386840820312, -31.615825653076172, -32.11310958862305, -32.814266204833984, -34.17627716064453, -34.845916748046875, -36.362552642822266, -38.207576751708984, -40.74843978881836, -44.385318756103516, -48.55998229980469, -47.21519088745117, -51.216514587402344, -50.100730895996094, -49.44249725341797, -53.1641731262207, -56.30058670043945, -58.3718376159668, -59.67190170288086, -60.38432312011719, -61.772830963134766, -62.7506103515625, -63.938621520996094, -64.62114715576172, -64.35713195800781, -66.89244842529297, 15.066789627075195, 15.24238395690918, 14.751054763793945, 15.03076457977295, 14.266862869262695, 14.544379234313965, 13.76075267791748, 13.799358367919922, 14.878401756286621, 13.524380683898926, 14.668540954589844, 13.277851104736328, 12.860018730163574, 12.87961483001709, 14.106449127197266, 12.672066688537598, 12.262079238891602, 12.309367179870605, 11.899307250976562, 11.95831298828125, 13.383491516113281, 11.849344253540039, 11.427570343017578, 11.58701229095459, 11.152270317077637, 11.654766082763672, 10.685210227966309, 11.14793872833252, 10.22532844543457, 10.474597930908203, 9.987019538879395, 10.419861793518066, 9.556634902954102, 9.893877983093262, 9.364506721496582, 9.789912223815918, 9.23778247833252, 9.656144142150879, 8.917020797729492, 9.295891761779785, 10.578024864196777, 8.669842720031738, 8.067720413208008, 8.319108963012695, 7.764983654022217, 8.458747863769531, 7.758238792419434, 8.604207038879395, 7.878355979919434, 8.139037132263184, 7.812491416931152, 8.045218467712402, 7.825507640838623, 9.143359184265137, 8.328527450561523, 8.552919387817383, 9.509198188781738, 8.428767204284668, 8.644680976867676, 8.75616455078125, 9.148237228393555, 9.199009895324707, 9.889113426208496, 10.870268821716309, 9.511799812316895, 10.345779418945312, 9.130401611328125, 8.706911087036133, 9.063611030578613, 8.719443321228027, 8.979187965393066, 8.71177864074707, 9.556770324707031, 9.068345069885254, 9.08609676361084, 9.329458236694336, 8.664011001586914, 8.830598831176758, 8.207993507385254, 8.2406644821167, 8.007857322692871, 8.130121231079102, 9.222833633422852, 8.401140213012695, 7.885528564453125, 8.33476734161377, 7.7013654708862305, 7.285739898681641, 7.217772960662842, 6.732208251953125, 6.71940803527832, 7.326486587524414, 6.518040180206299, 7.225553512573242, 6.329440116882324, 5.516270637512207, 5.814112186431885, 4.95138692855835, 5.2736663818359375, 4.37452507019043, 3.90032958984375, 3.535935401916504, 3.72714900970459, 4.267701148986816, 3.352041006088257, 3.9003024101257324, 4.855912685394287, 3.9139962196350098, 4.891931533813477, 3.907681703567505, 4.9063568115234375, 3.884683609008789, 2.57891845703125, 3.3908727169036865, 4.380886554718018, 3.3223631381988525, 4.32577657699585, 3.2390105724334717, 1.7951972484588623, 0.121520034968853, 0.8420259952545166, -0.7820897102355957, -0.1367400735616684, 0.6520874500274658, -0.7575302124023438]}
+{"type": "SampleBatch", "eps_id": [627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 627353008, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAKma6r7JJke+VEm5PBa8or19mOy+8aPHvoZErDz6jmE+ppbwvkPKR75mW9A85Y+GvRyW8r64+se+kZfFPPeHcD4Blva+5wYWv7kT7Dxp+wg/SJb8vmtdyL5v3yE9fNSAPhNMAL9khkm+T3w2Pbkn6LwHTgG/RMAZu/4pND3U/5y+GlEBv+PRSr5NCxs9L8rZubZUAr9elMm+lgIbPXGhmz7BWAS/qt8WvzjpMz0qvBs/Ol0Hv0gsyr4Iv2U9s9yoPspiCb8aZ02+0GGAPemfYj21aQq/tAjLvvilgj0Z9rs+eXEMv3o/T75rr5E9pPvCPcB6Db9zAsy+u5WVPWOk0T4EhQ+/J1lRvjRbpj29ChA++5AQv3UzLbwxHqw9X1UAvtaeEL+Apjs+DvymPbfcx76lrg+/eMlTvOH+lj0gwJW9lr8PvzBpOT4oAJQ9Me6uvkPSDr9y3XW8lQGGPdny3bzu5Q6/GRFYvn3lhD13LJI+f/oPvwwlirwgl5A9vshkPJkQEL9bbjU+jCmRPZPigr5eKA+/2qmavAWxhj1Rw289HUEPv0ByMz7QFok9UcxZvmxbDr/cKKq8kGCAPWN6zT2mdg6/Su9dvpx8hD2IANM+uZIPvzUEubzuXZU93eAPPlSwD79161++Px+bPSQa6T7yzhC/0F3Uvi3FrT3aVUU/mu4Sv944Yr4JWM09P4oBPysQFL8v/9+8/hHiPWF0fD4CNBS/NgcqPiEr7D0X1Bm8X1oTvz37tz6tyOs9xPOGvmGDEb80ryY+2/zgPUUygT0GrhC/ilu2PleS4z2NpkW+MNsOv5CuDD9mqts9ALrlvuUKDL9fxrQ+mUnJPSNi/r0cPAq/E/ILPygzxD0vyMS+l28Hvy1dsz4QdbQ9uiKBvWukBb/+vx0+49+xPXscgz5/2gS/UheyPgxdvD0BBwO8lhIDv2GhCj8wCbw9K2WKvsxMAL/MwLA+2/awPaFCTD2eEP2+MvwJP8MBsz2Xflu+p4v3viaXOz8jOqo90tr0vroK8L6TXQk/g6OWPehnJL4ajOq+wVSuPv8PkD2CGh4+hw/nvoHXCD/6YpY9Uh3svUSW4b5NgTo/EqqRPc+SxL50INq+KS5sPz/wgT01Qye/+K3Qvq8EOj9YWk49OMyuviQ9yb6o5Ac/oWIyPXtDCL2YzcO+hq05P/aoLz0QsZ++QWC8vr+TBz/9GxY9HcDEu/Lztr6XYzk/Ep4VPfHkkr6Pia++OU8HP486/DzW3Is8/h+qvu4kOT+nBv884g6Ivh24or5w+mo/xXzTPLVmDL/wUZm+yWqOP+BCczyqN1W/PO2NvvjUaj+3Qe260RoJv46IhL4h4zg/syZNvNlLeb6fRnq+N+JqP4Z2jrxWQQq/NHxnvgD/OD9D8ua88XyBvnyvWL4jDGs/9jAIvabpDb+24UW+Ozg5P3OaNb3JbYu+ahA3vvVvBz906Uu9AmDGO6g6LL7bjzk/f2pLvR6Umr5ZYh2+9MwHPwkmZL2QN8+8J4USvsvxOT+COGa9loervgKlA76JNQg/LtWAvT0feL3kfvG9SmA6P2BQg715rr6+7azTvXmLbD+IkZK9+0Ivv62997xvvoC8E1lEvRzc7DzYUPq8XClXvrb6Qb17uJw+7l4OvUwxa7xv5yi9HFmVuvqLD73w31W+Vf8ovS93jj4iqCC9auJXvPEzEr01bt68d7whvWDBVL5dbRS9+hOCPrLBMr27C0e8vDr/vNUaTL15wDO9H8ZTvmKyA71rc24+mrFEvZ45OLzSPeG8ZeyOvWmdRb2L51K+5KzsvPk7Wz68fFa9ISPNvg+Zybziu/4+KE93vY4fUr6CKnC8iuxJPj0PhL23tSG85owvvGv2zL28doS9g9o9PipYULyx0Mq+RL15vTg6HLy0Eqm8wBzcvT2Fer2WQlG+mq66vDXpNj5xoYW9bRASvJJqnbyqIvi97f6FvYulUL5iRLG8al4pPndXjr1VEMy+CSuWvMkA5z6xqp697g9Qvoh+GLyrcBw+QP2mvVn0Abzp3cy7UUISvmxQp70axj8+fzwVvFz9376opJ+94Lr8u3xLkrz8ORe+iPWfvaxvT765faq8R6gOPq1BqL0kpOq7eqqTvMOyI77DjKi9VN5Ovo/brbxsIQI+GNOwvVNJ2LtpCZm83Fowvk8Ysb32SE6+6UC1vC+E6j2qWLm9VTXFuwR+orwjgz2+xpe5vd6rTb5v0MC873DPPdnRwb0J8LC7CjiwvJ9+S753CsK9MQNNvi7H0LznXrI9yz3KvYw3yr4mgsK8O0a+PjNr2r2p+Ba/4J6FvJwfKD/8kvK9Cu3JvjIsULthybc+N10BvjsATL4cKYM7Iq+FPbNxBb6i7sm+g/CtO3/rtz58hQ2+GSNMvrGtTDzcs4s9qpoRvpSSkLvpB2M8FsthvsyxEb7UhUy+7MYaPKm2nD30yBW+jHObu+jZMzyDSFq+0+EVvlnSTL5JANw77+WpPYP6Gb49tqO7LC8JPNiUVL61FBq+3gpNvgJRijsQo7M9hi4evlt0yr7ezMM7d3HDPqhHJr4JMU2+2vtePJ46uj09Yiq+79myu8rHfDwMKkq+234qvs+dTb5/Fjw8rvjMPZybLr4bbL+7IOJcPIJ9Qb49ui6+TftNvmf3HjzOFN093dgyvrn2yr7kVkI84q7OPjf3Or4ETE6+7U6jPFcG6z10Fz++/EnZuzwctjzdry++ODo/vuTpTr4ZAJo8qx0DPp5dQ76wd+y7mvquPER1Ir50g0O+OIBPvlL8lDwAEhA+26lHvo31/rtxCaw8kbQVvqbSR77iElC+gxWUPLW1HD79+0u+SZ4IvFgorTwqGgm+tSdMvp2lUL6jOJc8M1spPvpTUL564RG8eFGyPNeo+L2pglC+dlw+PuxsnjxPdtC+A7RMvp/wwj5obzc8xpQxv9PnRL706j0+iHkvu86Dy75yG0G+d+HCPkIeLrxq5jC/3k85vvYbPj5vRsi8SKvNvoKCNb7aTxW8bAsFvQ5A771KsjW+MKVQvlidDr33aCk+jd45viOSBbzUDwG9ZFUNvkwJOr5ArU++V14MvcMGFD6aMD6+nGHsu8KGAL0IjCK+bFY+vry3Tr64hw29uLb9PdF4Qr74j827sWEDvRbON761mUK+d75NvgUWEr2vu9I9HrdGvvKAyr4cqAm9E6vEPsHQTr4Yu0y+KGHUvHDzpT366FK+oBLKvnsax7yxGLs+M/5avun+S76LO4u8LXuFPakSX74Cxsm+2Y2AvCt0tD7SJGe+qscWvyaeDbxe4CM/yzRzvqaYyb7FSYg7EoOwPiNFe76zwxa/nBw1PJGEIz9mqoO+zbTJvhE1wzyN+bI+IrOHvpXjFr+vevw8LVAmPzy8jb4mGsq+uHUzPXvSuz4Ax5G+iScXv+qCUT1hSiw/0tKXvvHJyr56UoQ9RTHLPhnhm778xE6+3JOUPZE7AD5u8p2+yZ8BvPa0mT3kKRO+KweevpjrUL4C0pM9TcovPgEeoL5K7CO8GdqaPbN4x707OKC+Dl88Ps7clj2rCLu+AFaevsrARrxa5oc9nSZOvc11nr7uWDo+m9aFPVuJpL7BmJy+mIVlvMtZcT1mRBC8er2cvsWLOD4ioXA9o42Qvgrlmr7ouL8+O4BZPeBQDr9sD5e+Ses2Prb1Kz2s63y+JjuVvuIKvz7luRc9V7EGvwNpkb5axDU+vD/ZPMhZY76wl4++DFOTvHPftDzamaE91caPvucONT4JzcE80LBTvlL3jb6WrZi8MO6fPGEfvz0uKI6+pytbvmA4rzxBwcg+QVmQvuR2nbw4du88YZLZPaWLkL5ArjM+C28APTxVNb6pv46+iYG9PrHa4zxRX+u+ZPWKvurQMj79iJg8wDEivp8rib6fDaq8BSt9PNN3Dz4KYom+3UhdvvaJlTwgEOA+h5iLvjn+rbw0Pd08LF4aPjTQi75r8F2+HPD1PCJW5z5eCI6+vKe0vJj7Hz0Fzyw+LkKOvvygMD64zi09MQzkvQJ+jL4ZLL68gq8kPcMTRz7duoy+52svPpycND1Qva69yfmKvp1Guz5Hny09g0C6vu86h762bQ8/ZdIPPTeOJL85foG+nba6PgRUtjwftK2+gYR7vgpOLT5vfH08MGUivS8NeL6GcLo+lH5wPCSip74LmHC+B90sPoc1BTxG2va8+yJtviJGuj6Eq/Y7gPajvomvZb5Uoiw+nzCTOtJgzrymO2K+gX/ZvD1MIjrmIok+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAH2Y7L7xo8e+hkSsPPqOYT6mlvC+Q8pHvmZb0Dzlj4a9HJbyvrj6x76Rl8U894dwPgGW9r7nBha/uRPsPGn7CD9Ilvy+a13Ivm/fIT181IA+E0wAv2SGSb5PfDY9uSfovAdOAb9EwBm7/ik0PdT/nL4aUQG/49FKvk0LGz0vytm5tlQCv16Uyb6WAhs9caGbPsFYBL+q3xa/OOkzPSq8Gz86XQe/SCzKvgi/ZT2z3Kg+ymIJvxpnTb7QYYA96Z9iPbVpCr+0CMu++KWCPRn2uz55cQy/ej9PvmuvkT2k+8I9wHoNv3MCzL67lZU9Y6TRPgSFD78nWVG+NFumPb0KED77kBC/dTMtvDEerD1fVQC+1p4Qv4CmOz4O/KY9t9zHvqWuD794yVO84f6WPSDAlb2Wvw+/MGk5PigAlD0x7q6+Q9IOv3LddbyVAYY92fLdvO7lDr8ZEVi+feWEPXcskj5/+g+/DCWKvCCXkD2+yGQ8mRAQv1tuNT6MKZE9k+KCvl4oD7/aqZq8BbGGPVHDbz0dQQ+/QHIzPtAWiT1RzFm+bFsOv9woqryQYIA9Y3rNPaZ2Dr9K712+nHyEPYgA0z65kg+/NQS5vO5dlT3d4A8+VLAPv3XrX74/H5s9JBrpPvLOEL/QXdS+LcWtPdpVRT+a7hK/3jhivglYzT0/igE/KxAUvy//37z+EeI9YXR8PgI0FL82Byo+ISvsPRfUGbxfWhO/Pfu3Pq3I6z3E84a+YYMRvzSvJj7b/OA9RTKBPQauEL+KW7Y+V5LjPY2mRb4w2w6/kK4MP2aq2z0AuuW+5QoMv1/GtD6ZSck9I2L+vRw8Cr8T8gs/KDPEPS/IxL6Xbwe/LV2zPhB1tD26IoG9a6QFv/6/HT7j37E9exyDPn/aBL9SF7I+DF28PQEHA7yWEgO/YaEKPzAJvD0rZYq+zEwAv8zAsD7b9rA9oUJMPZ4Q/b4y/Ak/wwGzPZd+W76ni/e+Jpc7PyM6qj3S2vS+ugrwvpNdCT+Do5Y96GckvhqM6r7BVK4+/w+QPYIaHj6HD+e+gdcIP/pilj1SHey9RJbhvk2BOj8SqpE9z5LEvnQg2r4pLmw/P/CBPTVDJ7/4rdC+rwQ6P1haTj04zK6+JD3JvqjkBz+hYjI9e0MIvZjNw76GrTk/9qgvPRCxn75BYLy+v5MHP/0bFj0dwMS78vO2vpdjOT8SnhU98eSSvo+Jr745Twc/jzr8PNbcizz+H6q+7iQ5P6cG/zziDoi+HbiivnD6aj/FfNM8tWYMv/BRmb7Jao4/4EJzPKo3Vb887Y2++NRqP7dB7brRGgm/joiEviHjOD+zJk282Ut5vp9Ger434mo/hnaOvFZBCr80fGe+AP84P0Py5rzxfIG+fK9YviMMaz/2MAi9pukNv7bhRb47ODk/c5o1vclti75qEDe+9W8HP3TpS70CYMY7qDosvtuPOT9/aku9HpSavlliHb70zAc/CSZkvZA3z7wnhRK+y/E5P4I4Zr2Wh6u+AqUDvok1CD8u1YC9PR94veR+8b1KYDo/YFCDvXmuvr7trNO9eYtsP4iRkr37Qi+/DdStvZzdOj9AnK69hqHUvthQ+rxcKVe+tvpBvXu4nD7uXg69TDFrvG/nKL0cWZW6+osPvfDfVb5V/yi9L3eOPiKoIL1q4le88TMSvTVu3rx3vCG9YMFUvl1tFL36E4I+ssEyvbsLR7y8Ov+81RpMvXnAM70fxlO+YrIDvWtzbj6asUS9njk4vNI94bxl7I69aZ1FvYvnUr7krOy8+TtbPrx8Vr0hI82+D5nJvOK7/j4oT3e9jh9SvoIqcLyK7Ek+PQ+Evbe1IbzmjC+8a/bMvbx2hL2D2j0+KlhQvLHQyr5EvXm9ODocvLQSqbzAHNy9PYV6vZZCUb6arrq8Nek2PnGhhb1tEBK8kmqdvKoi+L3t/oW9i6VQvmJEsbxqXik+d1eOvVUQzL4JK5a8yQDnPrGqnr3uD1C+iH4YvKtwHD5A/aa9WfQBvOndzLtRQhK+bFCnvRrGPz5/PBW8XP3fvqikn73guvy7fEuSvPw5F76I9Z+9rG9Pvrl9qrxHqA4+rUGovSSk6rt6qpO8w7IjvsOMqL1U3k6+j9utvGwhAj4Y07C9U0nYu2kJmbzcWjC+TxixvfZITr7pQLW8L4TqPapYub1VNcW7BH6ivCODPb7Gl7m93qtNvm/QwLzvcM892dHBvQnwsLsKOLC8n35LvncKwr0xA02+LsfQvOdesj3LPcq9jDfKviaCwrw7Rr4+M2vavan4Fr/gnoW8nB8oP/yS8r0K7cm+MixQu2HJtz43XQG+OwBMvhwpgzsir4U9s3EFvqLuyb6D8K07f+u3PnyFDb4ZI0y+sa1MPNyziz2qmhG+lJKQu+kHYzwWy2G+zLERvtSFTL7sxho8qbacPfTIFb6Mc5u76NkzPINIWr7T4RW+WdJMvkkA3Dvv5ak9g/oZvj22o7ssLwk82JRUvrUUGr7eCk2+AlGKOxCjsz2GLh6+W3TKvt7Mwzt3ccM+qEcmvgkxTb7a+148njq6PT1iKr7v2bK7ysd8PAwqSr7bfiq+z51Nvn8WPDyu+Mw9nJsuvhtsv7sg4lw8gn1Bvj26Lr5N+02+Z/cePM4U3T3d2DK+ufbKvuRWQjzirs4+N/c6vgRMTr7tTqM8VwbrPXQXP778Sdm7PBy2PN2vL744Oj++5OlOvhkAmjyrHQM+nl1DvrB37Lua+q48RHUivnSDQ744gE++UvyUPAASED7bqUe+jfX+u3EJrDyRtBW+ptJHvuISUL6DFZQ8tbUcPv37S75Jngi8WCitPCoaCb61J0y+naVQvqM4lzwzWyk++lNQvnrhEbx4UbI816j4vamCUL52XD4+7GyePE920L4DtEy+n/DCPmhvNzzGlDG/0+dEvvTqPT6IeS+7zoPLvnIbQb534cI+Qh4uvGrmML/eTzm+9hs+Pm9GyLxIq82+goI1vtpPFbxsCwW9DkDvvUqyNb4wpVC+WJ0OvfdoKT6N3jm+I5IFvNQPAb1kVQ2+TAk6vkCtT75XXgy9wwYUPpowPr6cYey7woYAvQiMIr5sVj6+vLdOvriHDb24tv090XhCvviPzbuxYQO9Fs43vrWZQr53vk2+BRYSva+70j0et0a+8oDKvhyoCb0Tq8Q+wdBOvhi7TL4oYdS8cPOlPfroUr6gEsq+exrHvLEYuz4z/lq+6f5Lvos7i7wte4U9qRJfvgLGyb7ZjYC8K3S0PtIkZ76qxxa/Jp4NvF7gIz/LNHO+ppjJvsVJiDsSg7A+I0V7vrPDFr+cHDU8kYQjP2aqg77NtMm+ETXDPI35sj4is4e+leMWv696/DwtUCY/PLyNviYayr64dTM9e9K7PgDHkb6JJxe/6oJRPWFKLD/S0pe+8cnKvnpShD1FMcs+GeGbvvzETr7ck5Q9kTsAPm7ynb7JnwG89rSZPeQpE74rB56+mOtQvgLSkz1Nyi8+AR6gvkrsI7wZ2po9s3jHvTs4oL4OXzw+ztyWPasIu74AVp6+ysBGvFrmhz2dJk69zXWevu5YOj6b1oU9W4mkvsGYnL6YhWW8y1lxPWZEELx6vZy+xYs4PiKhcD2jjZC+CuWavui4vz47gFk94FAOv2wPl75J6zY+tvUrPazrfL4mO5W+4gq/PuW5Fz1XsQa/A2mRvlrENT68P9k8yFljvrCXj74MU5O8c9+0PNqZoT3Vxo++5w41PgnNwTzQsFO+UveNvpatmLww7p88YR+/PS4ojr6nK1u+YDivPEHByD5BWZC+5HadvDh27zxhktk9pYuQvkCuMz4LbwA9PFU1vqm/jr6Jgb0+sdrjPFFf675k9Yq+6tAyPv2ImDzAMSK+nyuJvp8NqrwFK30803cPPgpiib7dSF2+9omVPCAQ4D6HmIu+Of6tvDQ93TwsXho+NNCLvmvwXb4c8PU8IlbnPl4Ijr68p7S8mPsfPQXPLD4uQo6+/KAwPrjOLT0xDOS9An6MvhksvryCryQ9wxNHPt26jL7nay8+nJw0PVC9rr3J+Yq+nUa7PkefLT2DQLq+7zqHvrZtDz9l0g89N44kvzl+gb6dtro+BFS2PB+0rb6BhHu+Ck4tPm98fTwwZSK9Lw14voZwuj6UfnA8JKKnvguYcL4H3Sw+hzUFPEba9rz7Im2+Ika6PoSr9juA9qO+ia9lvlSiLD6fMJM60mDOvKY7Yr6Bf9m8PUwiOuYiiT7ZxmK+y5ssPlzSwzuS4Mm8lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.563431441783905, 0.7628344297409058, 0.5356295704841614, 0.2208554744720459, 0.9120644927024841, 0.805221676826477, 0.5547137260437012, 0.7263613939285278, 0.41675612330436707, 0.16308242082595825, 0.9276096820831299, 0.8583623766899109, 0.3149293065071106, 0.8755647540092468, 0.26730912923812866, 0.8908889889717102, 0.7769559025764465, 0.5622466802597046, 0.6718210577964783, 0.6092480421066284, 0.632044792175293, 0.35083258152008057, 0.8472351431846619, 0.6944887042045593, 0.5346443057060242, 0.7282666563987732, 0.4901244044303894, 0.2427688091993332, 0.8860976099967957, 0.21124772727489471, 0.10345065593719482, 0.9379718899726868, 0.9069287776947021, 0.8476080894470215, 0.7441242337226868, 0.4099231958389282, 0.7811298966407776, 0.6485039591789246, 0.5273187160491943, 0.6908736824989319, 0.4775017201900482, 0.27513980865478516, 0.8451710343360901, 0.7583397030830383, 0.3778783679008484, 0.783466637134552, 0.6621564030647278, 0.5131763815879822, 0.31020182371139526, 0.8190524578094482, 0.718977689743042, 0.5592713356018066, 0.6369394063949585, 0.42603304982185364, 0.7499458193778992, 0.4087483584880829, 0.7619744539260864, 0.3944096863269806, 0.7722060084342957, 0.6174934506416321, 0.39916980266571045, 0.7937448620796204, 0.6316325068473816, 0.5874954462051392, 0.665071964263916, 0.5555748343467712, 0.7094094753265381, 0.4930602014064789, 0.7237469553947449, 0.5428276062011719, 0.6925302743911743, 0.6023407578468323, 0.6485161781311035, 0.3296322226524353, 0.8773176074028015, 0.5151809453964233, 0.7969620227813721, 0.5552796721458435, 0.7759831547737122, 0.5895014405250549, 0.7554612159729004, 0.6187127232551575, 0.735506534576416, 0.6438470482826233, 0.2839134633541107, 0.8970435857772827, 0.7174158692359924, 0.34208860993385315, 0.8786332011222839, 0.6870452165603638, 0.6770513653755188, 0.7033825516700745, 0.34064072370529175, 0.8805349469184875, 0.6641690731048584, 0.29057860374450684, 0.8934840559959412, 0.7339879870414734, 0.6211369633674622, 0.7483421564102173, 0.600459098815918, 0.762718677520752, 0.5777633190155029, 0.7773151993751526, 0.5524963140487671, 0.7922576665878296, 0.4758961796760559, 0.17312873899936676, 0.9298287630081177, 0.8380169868469238, 0.44677990674972534, 0.8466207385063171, 0.57846999168396, 0.7640720009803772, 0.5881628394126892, 0.7588244080543518, 0.595130443572998, 0.7550949454307556, 0.4003913998603821, 0.8638266921043396, 0.6254569888114929, 0.7293568849563599, 0.6374799013137817, 0.7203916311264038, 0.35219213366508484, 0.880338728427887, 0.6779037117958069, 0.6782917976379395, 0.6959462761878967, 0.6594412922859192, 0.7129769325256348, 0.6396936774253845, 0.7293673753738403, 0.6186374425888062, 0.7454165816307068, 0.4041803181171417, 0.15459243953227997, 0.930853545665741, 0.1416236311197281, 0.9363786578178406, 0.8771600723266602, 0.692272961139679, 0.6618179678916931, 0.7188828587532043, 0.6307380795478821, 0.743956446647644, 0.5969002842903137, 0.7679077982902527, 0.44033947587013245, 0.8384230732917786, 0.45649006962776184, 0.8333059549331665, 0.4636642634868622, 0.16825638711452484, 0.9304778575897217, 0.1564607173204422, 0.9345928430557251, 0.13891303539276123, 0.9397754669189453, 0.11874296516180038, 0.9452661871910095, 0.9010019302368164, 0.7714344263076782, 0.5068330764770508, 0.8109648823738098, 0.5675387382507324, 0.7181237936019897, 0.6213261485099792, 0.6761613488197327, 0.6662272214889526, 0.3659699261188507, 0.8406238555908203, 0.38610297441482544, 0.8361998200416565, 0.6061047911643982, 0.721760094165802, 0.5886551141738892, 0.26576125621795654, 0.8936172723770142, 0.7577317357063293, 0.471327543258667, 0.7948273420333862, 0.5243538618087769, 0.22516724467277527, 0.9051489233970642, 0.20626990497112274, 0.910885214805603, 0.8154552578926086, 0.4112367630004883, 0.8317334055900574, 0.6268150210380554, 0.33067744970321655, 0.859747052192688, 0.6731149554252625, 0.6428382396697998, 0.6720165610313416, 0.6439140439033508, 0.676583468914032, 0.36063018441200256, 0.8502982258796692], "advantages": [7.427423477172852, 6.072429656982422, 6.868880271911621, 5.495976448059082, 3.963945150375366, 4.469392776489258, 5.140400409698486, 5.844754695892334, 4.791353702545166, 3.366373300552368, 1.912610650062561, 2.1395699977874756, 2.5822439193725586, 1.2248811721801758, 1.5413694381713867, 0.23772397637367249, 0.3900194466114044, 0.6641584038734436, 1.2276068925857544, 0.1507691890001297, 0.62888503074646, -0.5168718099594116, -1.776077389717102, -1.7793984413146973, -1.5204557180404663, -2.7137339115142822, -2.5616979598999023, -3.7925326824188232, -5.086300373077393, -5.47756814956665, -6.67978572845459, -7.883882999420166, -8.724483489990234, -9.696170806884766, -10.332322120666504, -10.100647926330566, -11.655351638793945, -11.632172584533691, -10.544052124023438, -12.892107963562012, -12.00915241241455, -14.413633346557617, -16.12154197692871, -16.671293258666992, -16.10246467590332, -18.612600326538086, -18.214603424072266, -16.78709602355957, -20.068668365478516, -22.69741439819336, -22.63427734375, -21.470199584960938, -19.392126083374023, -23.42035675048828, -27.035579681396484, -26.09910011291504, -29.85240936279297, -28.99910545349121, -32.902523040771484, -32.13100051879883, -30.16973304748535, -27.392894744873047, -32.43961715698242, -37.49467849731445, -35.2943115234375, -40.5666389465332, -38.11614227294922, -43.57398986816406, -48.61722183227539, -47.002037048339844, -52.18333053588867, -50.229373931884766, -55.529361724853516, -53.21004104614258, -49.37760543823242, 21.45175552368164, 21.739347457885742, 21.289531707763672, 21.549474716186523, 21.172199249267578, 21.405364990234375, 21.09391975402832, 21.301456451416016, 21.051029205322266, 21.234195709228516, 22.120201110839844, 20.87684440612793, 20.704317092895508, 21.768112182617188, 20.989566802978516, 21.105453491210938, 21.041337966918945, 21.1368465423584, 21.872400283813477, 20.85971450805664, 20.84906578063965, 22.099565505981445, 21.24409294128418, 21.279481887817383, 21.388246536254883, 21.403446197509766, 21.566425323486328, 21.559871673583984, 21.78159523010254, 21.750450134277344, 22.0374813079834, 21.977205276489258, 22.470796585083008, 23.198339462280273, 21.98628044128418, 21.350448608398438, 21.765207290649414, 21.112199783325195, 21.33968162536621, 21.144468307495117, 21.377395629882812, 21.162199020385742, 21.403732299804688, 21.167354583740234, 21.484699249267578, 20.81749153137207, 21.015047073364258, 20.742582321166992, 20.935253143310547, 20.64582061767578, 20.91439437866211, 20.187877655029297, 20.30426597595215, 19.995586395263672, 20.08624839782715, 19.76849937438965, 19.830961227416992, 19.505708694458008, 19.536800384521484, 19.20586395263672, 19.201637268066406, 20.382747650146484, 23.039796829223633, 20.92194366455078, 23.806928634643555, 21.65060806274414, 20.373004913330078, 19.81646156311035, 20.247398376464844, 19.650461196899414, 20.168058395385742, 19.524856567382812, 20.13798713684082, 19.44279670715332, 19.147628784179688, 19.00004005432129, 18.697729110717773, 18.563947677612305, 18.253379821777344, 18.23143196105957, 17.51019859313965, 17.56808090209961, 16.742382049560547, 16.899930953979492, 15.932376861572266, 16.217252731323242, 15.069169998168945, 14.239531517028809, 14.072455406188965, 13.71423625946045, 13.422921180725098, 14.173169136047363, 13.0308837890625, 13.713277816772461, 12.56125259399414, 13.171510696411133, 15.170886993408203, 12.983121871948242, 15.015313148498535, 12.779891967773438, 11.530719757080078, 12.091137886047363, 10.830716133117676, 10.454005241394043, 9.675451278686523, 10.090161323547363, 11.965018272399902, 9.662449836730957, 8.382135391235352, 8.043291091918945, 7.081740856170654, 6.817946434020996, 5.75173807144165, 5.885012626647949, 4.7028350830078125, 4.740917205810547, 6.125810623168945, 9.087624549865723, 5.688118934631348, 3.326551675796509, 4.777929782867432, 2.37506365776062, 3.840435743331909, 1.3977327346801758, 0.07658004760742188]}
+{"type": "SampleBatch", "eps_id": [700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 700489972, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAANnGYr7Lmyw+XNLDO5LgybwYU1++s0TavOqrszvZMoo+yd5fvgkrY75sSDI8s3cQP+NpZL6YK9y8x5m1PMvYjD7L9mS+4fMrPvSr4jyXUyy8ZYZhvoyruT7M8uA8P7SWviIZWr5NJSs+HrmwPMco4TverFa+RFC5PlLZsTyiz46+QkNPvheDKj49JoQ84h6oPDzaS77ECbk+BIOHPNC3iL5yc0S+q2gOPyeGNzxC8A2/6Q45vvDVuD5t3+s4LTqEvjKqMb5n3Sk+vJCluxIoDT19RC6+Ld+4Pvf6jrvqBYW+Zt8mvrMAKj79nxy8fv4APfx4I74s+bg+ME4SvAtEh77cEhy+YHgOPyPgaLz+Rg+/Eq0QvkAkuT6gItC80AKLvjhFCb7c2yo+Z578vB/rVTxs2gW+PNbjvMZ6+rwAepc+PWwGvvbBKz7QAcq88enOu9b8Ar4z9bk+qQrLvHIEnb5CGfe9MXssPoZJ/bz5fLO8DTPwvelcuj5AcAC9rvulvldK4b1oZC0+7P4ava80Kr3OWtq9hrHOvGBmHr1AsHQ+X2Pbve2CLj4n0wq9On+GvWBo1L1lPMa8ZzQQvYZQXT4eZtW9roUvPsP//LzoGLO9xWDOva2GvrzWqQW9+wRIPqRUz72iIF++4VLrvK6A9D53Qdi9UFu3vDgVnbyiKzQ+KizZvS0pMT5wQYC8v1X7vQkW0r1USbO8x1yUvKvwKD6G+9K9mqcxPvypcryZjwi+V+DLvTjWvD6HLo+88IHcvvbEvL1lIDI+gr7VvIEAE77ypLW95ya9PrVD7byRguO+HYOmvUDuMj6qCBu9yM0kvt1an71ror0+2TcovX4+7r4mL5C9CBU0PlNWTr0SUj6+HPuIvY2Xl7wXkF29RmC5PSa9ib36mjU+1yVWvf/6X76CeYK9rCqLvPYQaL3whWk9pCuDve9SWL5SZWO9jJ+pPszSi72RG3y8jkFIvZk7sTwmdIy93bo4Ptd7Rr36dpK+gxCFveVYZbwJ6129O0QUvEyjhb1jTVW+0ahevec4iD6DK469uiLOvifdSL1jhgo/K6mevYnVGL8wiRy9L2VRP0Edt71Jgs2+Dw+zvHl0Az8Ujse9o+dSvvPaPbxlJFs+vv3PvanqLrymde+77pGovbBt0L0OoVK+dLMSvF4NVT6I2ti97BsrvG8MnbswEbO9C0jZvbNtUr6dWda7FZ9QPtSy4b2cdCi8KaohuxViur2kHuK9DExSvpZ5jLu+t00+FYjqvePfJryoDI25Db6+veLy6r0IO1K+0bQLu8s/TD6lW/O9GVAmvB938zq6SsC9FcbzvQU6Ur7Pxaq3LylMPs4u/L1Wvia88v6BOyUbv72Fmfy9MFM9PhSvCTvs/MS+1Qb1vbOMwj58Tbe7RUAtv3V25b3YXz0+07ScvImNxb5D4929ploivFvs27wNRsu9K0vevX4KPj5mL+y8LfLMviax1r3RVBW8SuIWvWhE7725ENe95f0+PmJ0IL2ZgNe+92zPvVORA7xc70K9xDMQvivBz71ITE++n3hOvfe/Cz7nC9i9MVrZu4tKQ72/yy++dFHYvcfdTb7YWlG9O0HYPW7CSz3abzg9xGg+vT7p/jy/ck89aJB2PjLcO73nPY2+YSxjPfnVPT12dVK9QqgDO1f4Zj3kjRe+VEtSvU1cjj6C2Fo9O0mvvj6EO73m2Q0/y8w+Pf4jFr7LHw69dEt9PurJMj1enUg9o7jzvLWjZb0PzTY9pDMVviXo/LwViWg+ad0qPawpTD1+s9e8rfSZvbnyLj0iXhS+gQTkvN4aVj4nFCM9guCtvsTCwbxPWPw+JkIHPaSdE75WBWK8UnJFPvXl9jygm62+g9YivCRV9j7/V788zUQTvkLspblqxD0+38enPL8pUj1lKV47qBXcvfAvsDxOUBO+eZ2iOhvCPj74nZg8gJGtvjK9ojvabvU+gCZCPGxnE75Ycm48DMRAPir7Ejygta2+3BCWPKOR+D5rOg87NeITvp+b5TxNZks+WkA4uiwLrr5wEwM9jAEAP6rO9btNwhS+sQksPTXRXj6rgSq81p9JPf3cPT19Gny9Z2AavAAMFr440jg900d7Pj5kSryQPa++cexMPSVNDT/SRZ286r0Jv9kjej1TJ1w/cm31vHwIsL5rS6A9VkwWPwrhFr3Sdxm+oVe4PZPfoz4RKCO97qczPcJzxT2Nwmk9OpAfvXktcz4vysc9BZhQvvMbDL1zttw+LnK/PVGd7b4el9G8lfAfP9ZvrD0P1Tm/LHVWvJZv2z4ltI49qN/QvpgJlLsyIW4+1Px7PQ3PwL1Tsgs5TX3aPndGdD1jzbu+GQQOPEZcbD4VOlY9txJlvb+mWTz0rdk+O6VRPUrPqb6ue7I8CtlqPtN5Nj0qwb68Cw/YPLisCT1+kTQ9T/SQPtWQ3TxejWk+0sJLPR6DmDuRdwE9jzcEPW4kTD06DqA+hBwEPd4YaD5OwGU9yakTPd6tFj1lhNc+V7RoPY8kdL5zKTk9oX0dP0wsVT1tPQO/F49rPWm51j4fLSs9J+9Qvhn1hj14Jx0/JnYaPYVs974iGqA94iTWPlS/5TzqNze+zjuxPYQVZD60bsg809L6PWJbuj29wtU+kn/cPCBKJr40dcs9L1VjPlnkwTyr/A0+F43UPSdk1T4jnNg8mfoVvlif5T0ijBw/AJ3APEWD3L6Gq/49UWlOPzAZdDwiQze/EdkPPo4ngD8YXRg6U32Av0daJD4WW04/i7SfvGsBNr9x3DQ+nnkcPyoYCr3rVNm+DWFBPuJM1T4T3iy9KQgSvj/pST6fcGM+zow4vcG9Cz69dU4+APDVPuheLb2FKi6+dgRXPn0RHT/UTTu92Z3zvjiVYz5dlNY+XUhiveCiSr6EKmw+wzRmPlp+cr3obZ09K8VwPr+N+jxHMmy9z1qzPoZlcT7L6Wc+6H9PvfPeIz3rCHY+o40DPeM4TL1P8KE+T7F2PoUnJr7hTzK9PsQXP5lecz7e+wg9Kb8BvQzTkj7wDXQ+mX9qPneC1Lxs6YC8kb54Pk1W2T5+Fte88gqivg24gD4fQ2s+hXgFvYDlA71SEoM+yc0PPdQbCL1IA4A+W26DPqk5bD7ZQOe8ZONYvRjLhT6MYhM9yO3vvNc6bD5rKYY+HhFtPs8hyrysk5G9T4iIPiaIFj05x9W8AthaPqboiD5Lz20+YcOyvGdZsr1xSYs++VMZPfkHwbzXZks+k6uLPkd5bj6jfKC8PaTPvREOjj762Bs9IxmxvPd9PT7PcY4+mhNvPovHkrxAPOq92NWQPikoHj2vhKW8GL8wPhE7kT5Yom8+Jj2JvMZqAb6HoJM+/VAgPRLynbyK1CQ+IQeUPj4pcD6gkoO8iQoNvvFtlj7PYSI9rCOavPxuGT7e1ZY+yqtwPgmXgbx6Shi+/D2ZPjFoJD3f9Jm8akMOPjWnmT5SLXE+xDGDvER0I76fEJw+QJvcPtlYnbw1Jeq+IHqgPv+wcT4IRui8hNguvtvkoj4u89w+2x8CvUzJ8b4eUKc+n5FyPm3PKL0fSkK+GL2pPiyRLT16Wji9+qK3PS0sqj5RuRu+CQIxvW1SvD6Gnag+/LkyPV7gEr26PX096Q+pPmL+dD7Gzw29/MF3vhiDqz5j2TY92qEhvalBIj0e+Ks+vRJ2PhljHr1hz4e+EG6uPjdrOz3hHTS9yYd1PAPmrj7vRhi+muMyvZhClj4vYK0+bLWvvvLYGr1PcxI/jtypPlQXF75099e8AROJPsNZqD4ZQK++VRqsvEhODT962KQ+0n8Jv5dVI7z0hFY/fFifPvUGr745gN47UcsKP1nYmz4OWRa+GXSQPIjVgD51V5o+W8dEPTCuuTyoKRG9ZdWaPnbvFr6537M8RFOHPgBTmT6gKkI9kS3fPIYQr7xEz5k++6YXvjyt2zyhP48+CUuYPp4APz0UwgQ9Ga6Nu0fFmD6EEHc+Z2cEPR++kr7DPZs+fDY7PabZ2Txp1oM8lLWbPrRhGb6ofNw89FOiPuwsmj7FFDg9SDcIPYcNBz28opo+okEavsDqCj3VAqw+1xeZPsYmND1TcCY9LNhdPSOLmT4EOXQ+K+AqPfTWZr5Y/Js+h1UvPZJoGD2fE6Q9j2ycPpEScz63+B499GlNvtPanj7RJN0+14kOPT4q9r4VR6M+df9xPthNzjxTmjW+mLKlPmZVJz1kP7E8VUL8PbAdpj5qUHE+qG3FPBaCJr50h6g+8mDcPnzJqjxqKuW+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAABhTX76zRNq86quzO9kyij7J3l++CStjvmxIMjyzdxA/42lkvpgr3LzHmbU8y9iMPsv2ZL7h8ys+9KviPJdTLLxlhmG+jKu5Pszy4Dw/tJa+Ihlavk0lKz4eubA8xyjhO96sVr5EULk+UtmxPKLPjr5CQ0++F4MqPj0mhDziHqg8PNpLvsQJuT4Eg4c80LeIvnJzRL6raA4/J4Y3PELwDb/pDjm+8NW4Pm3f6zgtOoS+MqoxvmfdKT68kKW7EigNPX1ELr4t37g+9/qOu+oFhb5m3ya+swAqPv2fHLx+/gA9/Hgjviz5uD4wThK8C0SHvtwSHL5geA4/I+BovP5GD78SrRC+QCS5PqAi0LzQAou+OEUJvtzbKj5nnvy8H+tVPGzaBb481uO8xnr6vAB6lz49bAa+9sErPtAByrzx6c671vwCvjP1uT6pCsu8cgSdvkIZ970xeyw+hkn9vPl8s7wNM/C96Vy6PkBwAL2u+6W+V0rhvWhkLT7s/hq9rzQqvc5a2r2Gsc68YGYevUCwdD5fY9u97YIuPifTCr06f4a9YGjUvWU8xrxnNBC9hlBdPh5m1b2uhS8+w//8vOgYs73FYM69rYa+vNapBb37BEg+pFTPvaIgX77hUuu8roD0PndB2L1QW7e8OBWdvKIrND4qLNm9LSkxPnBBgLy/Vfu9CRbSvVRJs7zHXJS8q/AoPob70r2apzE+/KlyvJmPCL5X4Mu9ONa8Pocuj7zwgdy+9sS8vWUgMj6CvtW8gQATvvKktb3nJr0+tUPtvJGC474dg6a9QO4yPqoIG73IzSS+3VqfvWuivT7ZNyi9fj7uviYvkL0IFTQ+U1ZOvRJSPr4c+4i9jZeXvBeQXb1GYLk9Jr2JvfqaNT7XJVa9//pfvoJ5gr2sKou89hBovfCFaT2kK4O971JYvlJlY72Mn6k+zNKLvZEbfLyOQUi9mTuxPCZ0jL3dujg+13tGvfp2kr6DEIW95VhlvAnrXb07RBS8TKOFvWNNVb7RqF695ziIPoMrjr26Is6+J91IvWOGCj8rqZ69idUYvzCJHL0vZVE/QR23vUmCzb4PD7O8eXQDPxSOx72j51K+89o9vGUkWz6+/c+9qeouvKZ177vukai9sG3QvQ6hUr50sxK8Xg1VPoja2L3sGyu8bwyduzARs70LSNm9s21Svp1Z1rsVn1A+1LLhvZx0KLwpqiG7FWK6vaQe4r0MTFK+lnmMu763TT4ViOq9498mvKgMjbkNvr694vLqvQg7Ur7RtAu7yz9MPqVb870ZUCa8H3fzOrpKwL0VxvO9BTpSvs/FqrcvKUw+zi78vVa+Jrzy/oE7JRu/vYWZ/L0wUz0+FK8JO+z8xL7VBvW9s4zCPnxNt7tFQC2/dXblvdhfPT7TtJy8iY3FvkPj3b2mWiK8W+zbvA1Gy70rS969fgo+PmYv7Lwt8sy+JrHWvdFUFbxK4ha9aETvvbkQ173l/T4+YnQgvZmA1773bM+9U5EDvFzvQr3EMxC+K8HPvUhMT76feE69978LPucL2L0xWtm7i0pDvb/LL750Udi9x91NvthaUb07Qdg9ho3gvWs7q7tltEi9OKJPvr9yTz1okHY+Mtw7vec9jb5hLGM9+dU9PXZ1Ur1CqAM7V/hmPeSNF75US1K9TVyOPoLYWj07Sa++PoQ7vebZDT/LzD49/iMWvssfDr10S30+6skyPV6dSD2juPO8taNlvQ/NNj2kMxW+Jej8vBWJaD5p3So9rClMPX6z17yt9Jm9ufIuPSJeFL6BBOS83hpWPicUIz2C4K2+xMLBvE9Y/D4mQgc9pJ0TvlYFYrxSckU+9eX2PKCbrb6D1iK8JFX2Pv9XvzzNRBO+QuyluWrEPT7fx6c8vylSPWUpXjuoFdy98C+wPE5QE755naI6G8I+PvidmDyAka2+Mr2iO9pu9T6AJkI8bGcTvlhybjwMxEA+KvsSPKC1rb7cEJY8o5H4Pms6Dzs14hO+n5vlPE1mSz5aQDi6LAuuvnATAz2MAQA/qs71u03CFL6xCSw9NdFePquBKrzWn0k9/dw9PX0afL1nYBq8AAwWvjjSOD3TR3s+PmRKvJA9r75x7Ew9JU0NP9JFnbzqvQm/2SN6PVMnXD9ybfW8fAiwvmtLoD1WTBY/CuEWvdJ3Gb6hV7g9k9+jPhEoI73upzM9wnPFPY3CaT06kB+9eS1zPi/Kxz0FmFC+8xsMvXO23D4ucr89UZ3tvh6X0byV8B8/1m+sPQ/VOb8sdVa8lm/bPiW0jj2o39C+mAmUuzIhbj7U/Hs9Dc/AvVOyCzlNfdo+d0Z0PWPNu74ZBA48RlxsPhU6Vj23EmW9v6ZZPPSt2T47pVE9Ss+pvq57sjwK2Wo+03k2PSrBvrwLD9g8uKwJPX6RND1P9JA+1ZDdPF6NaT7Swks9HoOYO5F3AT2PNwQ9biRMPToOoD6EHAQ93hhoPk7AZT3JqRM93q0WPWWE1z5XtGg9jyR0vnMpOT2hfR0/TCxVPW09A78Xj2s9abnWPh8tKz0n71C+GfWGPXgnHT8mdho9hWz3viIaoD3iJNY+VL/lPOo3N77OO7E9hBVkPrRuyDzT0vo9Ylu6Pb3C1T6Sf9w8IEomvjR1yz0vVWM+WeTBPKv8DT4XjdQ9J2TVPiOc2DyZ+hW+WJ/lPSKMHD8AncA8RYPcvoar/j1RaU4/MBl0PCJDN78R2Q8+jieAPxhdGDpTfYC/R1okPhZbTj+LtJ+8awE2v3HcND6eeRw/KhgKvetU2b4NYUE+4kzVPhPeLL0pCBK+P+lJPp9wYz7OjDi9wb0LPr11Tj4A8NU+6F4tvYUqLr52BFc+fREdP9RNO73ZnfO+OJVjPl2U1j5dSGK94KJKvoQqbD7DNGY+Wn5yvehtnT0rxXA+v436PEcybL3PWrM+hmVxPsvpZz7of0+9894jPesIdj6jjQM94zhMvU/woT5PsXY+hScmvuFPMr0+xBc/mV5zPt77CD0pvwG9DNOSPvANdD6Zf2o+d4LUvGzpgLyRvng+TVbZPn4W17zyCqK+DbiAPh9Daz6FeAW9gOUDvVISgz7JzQ891BsIvUgDgD5bboM+qTlsPtlA57xk41i9GMuFPoxiEz3I7e+81zpsPmsphj4eEW0+zyHKvKyTkb1PiIg+JogWPTnH1bwC2Fo+puiIPkvPbT5hw7K8Z1myvXFJiz75Uxk9+QfBvNdmSz6Tq4s+R3luPqN8oLw9pM+9EQ6OPvrYGz0jGbG89309Ps9xjj6aE28+i8eSvEA86r3Y1ZA+KSgePa+EpbwYvzA+ETuRPliibz4mPYm8xmoBvoegkz79UCA9EvKdvIrUJD4hB5Q+PilwPqCSg7yJCg2+8W2WPs9hIj2sI5q8/G4ZPt7Vlj7Kq3A+CZeBvHpKGL78PZk+MWgkPd/0mbxqQw4+NaeZPlItcT7EMYO8RHQjvp8QnD5Am9w+2VidvDUl6r4geqA+/7BxPghG6LyE2C6+2+SiPi7z3D7bHwK9TMnxvh5Qpz6fkXI+bc8ovR9KQr4Yvak+LJEtPXpaOL36orc9LSyqPlG5G74JAjG9bVK8PoadqD78uTI9XuASvbo9fT3pD6k+Yv50PsbPDb38wXe+GIOrPmPZNj3aoSG9qUEiPR74qz69EnY+GWMevWHPh74Qbq4+N2s7PeEdNL3Jh3U8A+auPu9GGL6a4zK9mEKWPi9grT5sta++8tgavU9zEj+O3Kk+VBcXvnT317wBE4k+w1moPhlAr75VGqy8SE4NP3rYpD7Sfwm/l1UjvPSEVj98WJ8+9QavvjmA3jtRywo/WdibPg5ZFr4ZdJA8iNWAPnVXmj5bx0Q9MK65PKgpEb1l1Zo+du8WvrnfszxEU4c+AFOZPqAqQj2RLd88hhCvvETPmT77phe+PK3bPKE/jz4JS5g+ngA/PRTCBD0Zro27R8WYPoQQdz5nZwQ9H76SvsM9mz58Njs9ptnZPGnWgzyUtZs+tGEZvqh83Dz0U6I+7CyaPsUUOD1INwg9hw0HPbyimj6iQRq+wOoKPdUCrD7XF5k+xiY0PVNwJj0s2F09I4uZPgQ5dD4r4Co99NZmvlj8mz6HVS89kmgYPZ8TpD2PbJw+kRJzPrf4Hj30aU2+09qePtEk3T7XiQ49Pir2vhVHoz51/3E+2E3OPFOaNb6YsqU+ZlUnPWQ/sTxVQvw9sB2mPmpQcT6obcU8FoImvnSHqD7yYNw+fMmqPGoq5b7L76w+LqhwPnboQjxR+Be+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.3526957333087921, 0.14588908851146698, 0.9279260039329529, 0.8642497658729553, 0.6950178146362305, 0.6099445819854736, 0.7067097425460815, 0.5995811223983765, 0.7148560285568237, 0.4066804349422455, 0.8370143175125122, 0.6136423349380493, 0.7033019661903381, 0.626803994178772, 0.6934657096862793, 0.3556269109249115, 0.8688167333602905, 0.6879609227180481, 0.36533549427986145, 0.8508206009864807, 0.6159877181053162, 0.7379282116889954, 0.5787248015403748, 0.7705727815628052, 0.4693235456943512, 0.8079946041107178, 0.5041186809539795, 0.7921157479286194, 0.5371452569961548, 0.22471413016319275, 0.9074897170066833, 0.7715845704078674, 0.5683070421218872, 0.7613142728805542, 0.41411346197128296, 0.8545545935630798, 0.37226608395576477, 0.87298184633255, 0.3204433023929596, 0.892001211643219, 0.7374461889266968, 0.59306401014328, 0.7798377871513367, 0.47273099422454834, 0.8156664967536926, 0.4795115292072296, 0.8371322154998779, 0.5875725746154785, 0.25028878450393677, 0.09666867554187775, 0.9492496252059937, 0.906002938747406, 0.750521719455719, 0.613213837146759, 0.7475426197052002, 0.6159107089042664, 0.7462193369865417, 0.6163398027420044, 0.7465018630027771, 0.6145813465118408, 0.7483493685722351, 0.6106396317481995, 0.7517302632331848, 0.395553857088089, 0.1433952897787094, 0.9358101487159729, 0.8732455372810364, 0.325672447681427, 0.8862518668174744, 0.2834150493144989, 0.8998479843139648, 0.7622450590133667, 0.5651187300682068, 0.7938673496246338, 0.5091937780380249, 0.5167255997657776, 0.822179913520813, 0.548430323600769, 0.22651681303977966, 0.9097132682800293, 0.7622870802879333, 0.6056082844734192, 0.7432963848114014, 0.6309908628463745, 0.2753540277481079, 0.8985282182693481, 0.2735895812511444, 0.9011515378952026, 0.7388026118278503, 0.6241142153739929, 0.2573023736476898, 0.9077515006065369, 0.23462699353694916, 0.9152626991271973, 0.2052493542432785, 0.9238213300704956, 0.8274386525154114, 0.4461662769317627, 0.14936786890029907, 0.06195663660764694, 0.9619399309158325, 0.9466362595558167, 0.9081125855445862, 0.799969494342804, 0.538242757320404, 0.24238047003746033, 0.8970289826393127, 0.7285022139549255, 0.6437066197395325, 0.6920600533485413, 0.6815974116325378, 0.6583442687988281, 0.28890737891197205, 0.8932166695594788, 0.25030797719955444, 0.9042867422103882, 0.7867713570594788, 0.49777668714523315, 0.7946602702140808, 0.5209549069404602, 0.7871285676956177, 0.4689597189426422, 0.8232669830322266, 0.44756776094436646, 0.8328958749771118, 0.5736610293388367, 0.23976729810237885, 0.09205390512943268, 0.952098548412323, 0.9221673607826233, 0.8314427733421326, 0.5761308073997498, 0.7505484819412231, 0.3738429546356201, 0.8780505657196045, 0.6957733035087585, 0.3589388132095337, 0.8582263588905334, 0.4041714668273926, 0.15794162452220917, 0.9275492429733276, 0.8371842503547668, 0.5470552444458008, 0.8010272979736328, 0.4985509216785431, 0.8025622963905334, 0.5279192924499512, 0.7874886393547058, 0.5539290308952332, 0.7726758122444153, 0.5771138072013855, 0.7580527067184448, 0.5980157256126404, 0.7434846758842468, 0.6171480417251587, 0.7287819385528564, 0.6349784731864929, 0.7137050628662109, 0.6519235372543335, 0.6979685425758362, 0.6683480143547058, 0.6812408566474915, 0.3154350519180298, 0.8865808248519897, 0.2822732925415039, 0.8968557715415955, 0.7556546330451965, 0.44990891218185425, 0.8108873963356018, 0.5221246480941772, 0.797235369682312, 0.47841259837150574, 0.8181584477424622, 0.569139301776886, 0.26748043298721313, 0.8876324892044067, 0.2711043357849121, 0.11027148365974426, 0.9452783465385437, 0.9027324914932251, 0.7765218615531921, 0.5241576433181763, 0.794934868812561, 0.49170902371406555, 0.8148983120918274, 0.5471991300582886, 0.770397424697876, 0.4310435950756073, 0.8436997532844543, 0.3905666470527649, 0.8622559309005737, 0.6562315225601196, 0.6873596906661987, 0.6870216727256775, 0.3421062231063843, 0.8696892857551575, 0.6479174494743347, 0.7160850167274475, 0.37141895294189453, 0.860981822013855], "advantages": [-5.805177688598633, -7.171714782714844, -7.50447940826416, -9.018877983093262, -9.317327499389648, -8.29710578918457, -10.692469596862793, -9.70886516571045, -12.131909370422363, -11.181135177612305, -8.599435806274414, -12.267099380493164, -14.794938087463379, -13.751933097839355, -16.2932186126709, -15.230537414550781, -12.382567405700684, -16.213668823242188, -18.843955993652344, -20.248062133789062, -20.56816291809082, -19.29852294921875, -21.902080535888672, -20.527822494506836, -23.156572341918945, -24.566425323486328, -24.704952239990234, -26.109140396118164, -26.212936401367188, -27.61455726623535, -28.09249496459961, -29.402257919311523, -29.52312469482422, -30.908702850341797, -31.026498794555664, -29.623170852661133, -32.17559051513672, -30.59762191772461, -33.21186828613281, -31.408525466918945, -34.087188720703125, -35.544185638427734, -35.202537536621094, -36.680118560791016, -37.20357894897461, -38.09786605834961, -37.558563232421875, -39.078208923339844, -39.642520904541016, -39.63841247558594, -39.3078498840332, -41.7000732421875, -43.37665557861328, -44.12235641479492, -44.83919906616211, -45.58688735961914, -46.33348846435547, -47.08747100830078, -47.86366653442383, -48.628150939941406, -49.43378448486328, -50.21290969848633, -51.0478401184082, -51.8458251953125, -51.456478118896484, -49.496952056884766, -52.27008056640625, -54.05083465576172, -53.340553283691406, -55.1651611328125, -54.28925323486328, -56.1627082824707, -57.1577033996582, -57.4947624206543, -58.560482025146484, 16.70970916748047, 16.96590232849121, 16.588821411132812, 17.302526473999023, 19.09515380859375, 16.693416595458984, 15.744462013244629, 16.35842514038086, 15.438992500305176, 16.029354095458984, 17.680248260498047, 15.462859153747559, 17.01536750793457, 14.911099433898926, 14.134074211120605, 14.561488151550293, 16.000106811523438, 14.017610549926758, 15.378988265991211, 13.485238075256348, 14.789929389953613, 12.968857765197754, 12.260245323181152, 12.589587211608887, 13.902763366699219, 16.11175537109375, 13.3578519821167, 11.639630317687988, 10.878172874450684, 10.739867210388184, 11.25317096710205, 12.947202682495117, 11.02008056640625, 10.254191398620605, 10.72831916809082, 10.028216361999512, 10.469204902648926, 9.838679313659668, 10.081477165222168, 9.609055519104004, 9.982455253601074, 9.4464111328125, 9.749494552612305, 11.156461715698242, 9.840692520141602, 11.266512870788574, 10.023856163024902, 9.963217735290527, 10.126771926879883, 10.223444938659668, 10.309179306030273, 11.596709251403809, 14.647881507873535, 20.116416931152344, 15.7312650680542, 13.200026512145996, 12.58830738067627, 13.588249206542969, 12.909220695495605, 13.86764144897461, 13.633208274841309, 14.93297004699707, 16.823040008544922, 15.00060749053955, 16.80690574645996, 18.335186004638672, 16.48262596130371, 14.720860481262207, 13.699496269226074, 15.019783973693848, 16.53319549560547, 14.939926147460938, 16.346616744995117, 14.816868782043457, 16.119924545288086, 14.651726722717285, 15.855266571044922, 14.445880889892578, 15.554400444030762, 14.200718879699707, 15.218750953674316, 13.917484283447266, 14.849333763122559, 13.597264289855957, 14.446856498718262, 13.24090576171875, 14.011672019958496, 12.848993301391602, 12.13128662109375, 12.574131965637207, 11.975226402282715, 12.226496696472168, 12.608736991882324, 12.938835144042969, 11.88167667388916, 10.950910568237305, 11.229426383972168, 10.345603942871094, 10.540698051452637, 10.757345199584961, 10.82253360748291, 9.876598358154297, 9.960009574890137, 9.961868286132812, 9.089656829833984, 8.151199340820312, 7.044088840484619, 7.431649208068848, 6.314099311828613, 6.723358631134033, 5.597305774688721, 4.487648963928223, 4.9612650871276855, 5.372627258300781, 4.250913619995117, 4.6903300285339355, 3.5701632499694824, 2.4009199142456055, 2.967733144760132, 1.801228642463684, 0.9750916361808777, 1.269266963005066, 1.7423484325408936, 0.6158199906349182, -0.165374755859375]}
+{"type": "SampleBatch", "eps_id": [1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1730362651, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAMvvrD4uqHA+duhCPFH4F77gV68+hiTcPhRHEjyv59++Ab+zPitacD7Xnj45CDwRvk4mtj7BLSI9g/wtuxSIGj4ajrY+bWNwPhqDvjk+CBK+f/W4Pg5QIj1xGyO74MoZPmBduT5ia3A+Hv0GOsC3Er7ZxLs+mm0iPTQNGrvlJxk+zSy8PkFycD6e8yc6VU8TvliUvj7yHtw+mpESu7Rn375d+8I+KnhwPhefM7xd0xO+92LFPnU33D737GK854bhvnrKyT7oGyA/p6G5vLvMPL/+MdA+cSNSP007Gb3Vw4S/z5nYPspRID+DM269NatBv3sD3z6qK90+bhaWvdoi977gb+M+ZqtzPsfbqb2fG1u+q9/lPh+xND1xn7K9BLFSPU9T5j7yI3Y+E4SwvantiL5uyeg+L8k+PV14u73jiE27iEPpPn2mFr5Ambu9kMWEPt7B5z6/c0k9FvqwvfgAeb3MQug+PhIUvol3s719TlA+vMfmPj2XUz16Iqu9Eq3svSdP5z7ulRG+Qt6vvXw4GT502uU+7wOsvke9qb07/NM+vGniPhMhD77Sx5i9hHXFPVP74D6b4aq+1NSUvdu0uj5pkN0+NfsMvhXlhb1EtUw9gCfcPijiqb4H2YO9YIakPrLB2D4oFgu+Hl9tvTmOFDyjXdc+Kf+ovvegbL2E2pA+X/zTPhs6Br/Dc1W9tYwOP+Odzj6RMqi+GdYnvesnfj63QMs+7OQFv/yAE70bFgc/pOXFPuaip75rjdC8oD5lPleLwj7WGge+j9+rvIQlnb15McE+NkynvutxuLzkR1Y+6di9Puh3Br76KJa8CTm5vauAvD7r/6a+V/qkvOMdST6iKbk+0ecFvpnMhLwlDdK91dK3Pv+7pr5zmpW8TWY9Pid9tD4UQAW/SJluvE+T8T6sKK8+6iU3vyH7p7vLh0I/PNWnPswtBb8sAiU8M2LuPnuBoj5ph6a+eMmePOtTND7aLJ8+CkkFv7KjuzyjHfM+A9iZPmvWpr7ftwQ9iAFCPs6Blj4YfAW/HT0UPaP/+z7rKpE+dlanvvmOPD3WMFg+J9KNPhKTB76Q2k09jpSIvRR3jD4C1H09+2NIPRarr76IGY0+OwYJvptILD2SARG9v7qLPsm1qL4tYik9Jl6KPvRaiD5rPgq+uIU/PVw9FbwM+YY+5lmpvrHGPj08jZg++JWDPqGbC741L1c9xmumPJMwgj4EjG09P9lYPQC8gr6ayII+TyYNvl7uQz2QYVs9Q1+BPsGuZz2ZUUg97gVlvorzgT7xfIA+NP81PQgiAL9lhYQ+TA7kPo3+DD1ZZEe/CxWJPgTXfz65YJo8FX3zvv6jiz44El89RuwYPNA/Nb7CMow+LiwQvqrYvTuS6vI9rcGKPu4vXj0vygU8KF8wvuBPiz50YBC+oLOaO47s+z1F3ok+IG9dPT5R6ztdNyy+/WuKPgWNEL6oMno7Us0BPvD5iD6Nylw9GCzQO2CrKL4/h4k+suh+PpZySDumKOm+0BOMPmI9XD0tOMa76J8lvsSgjD7dpxC+DxwYvE0fBD5yLos+5CJdPTSp27vKkSq++buLPgFsEL6naSS84u39Pcnk+zpMZi09spfJut1RKztJbzU7r24cvmK9wrqF8ZY+v2OWuaOTLT3NhZA7/ZzZOr77EjqmeRy+WJyROx5qlz7biiO7HSyyvu21KTz1/RY/d+oavJqwHL5/fbU8Zs+ZPoAOTbx0Iys9n7XmPH1rcjyVXT+8ISpyPjci6TxMqYm+7L7ju9fSJz0AFb08ur0FPd7kyLsULh6+g27CPO9Aqj6KEBe8XBUlPbHp+DxrPUI9odsJvGT3Hr5aVwA9MPayPii6PLz/eCE9oPkcPen/iD0yzy+87ZZvPoB0Ij33k1q+C0jGu09l2z4G+BA9spr8vhsXJTuFfm4+tBrRPEBMQr5xLus7OfXaPkMEsjwU0/K+pNyAPL/NbT4ooEg8mgIzvhXppjymtto+pVcPPDte7b4X5uw8IX9tPucrCbreNyy++nIJPSzMFj1zu3671qP+PQ93DD1aDCK+d4a3um/i1D6fAP88kRsXPWGc4jtUOPs9+4UCPfMgIr4pgBk8isXVPiwb6zzfJxY9QSiRPB/gAj7EHPE8UBRtPuoYpjzODSO+xYULPWBK2j45Aow8FxLkvvNyLj1tiGw+Wg0GPMT3Fr4kX0E9wRvaPiF8qzuhAuC+3URkPSVWbD4bf2a77qASvgktdz1jVRI9MBfRu7vwFz5DGno9mSAjvo6yX7soyuA+ag1tPUroEj3N4a87r8UUPpX9bz0FKCO+UowHPLIa4T4k8GI9UxwSPbTOizz5Lhk+OtxlPf4TbD4ZUaQ8/fUMvhu/eD2h8A89V8ONPBgoJT4UoHs9KIhrPicwqDzK6AC+4zuHPcaD2T4HkJM8xPTSvpaimD1epB4/AR0gPPLvMr+RBLI9IFDZPlTaibs8dM6+I2fDPXifHj+YDkm8HX4yv1XI3D2Abtk+iMPWvFQf0b5VLe496XJrPmvXDL02PP69VJj3Pcje2T7JAhe9Fd3aviiDBD5XfWw+cAc6vZwmFr77PQk++IQVPYcKRr2UiQY+Xv0JPszbbT40Rzu9LWE0vjS/Dj5SAxs9YLVJvYtp0D2ehQ8+eksgvjxfQb1gzME+6VAMPtOmID1BXSK9OROSPYseDT6uhnA+coUcvagzb74J7hE+RTQlPUyoL70Zqj89gMESPh7ZHb760iu9q7qmPlGZDz7dKio9wCURvXsZpDwhcxA+KtlyPqeBD71cNpG+g04VPsCS3T6NvSa97E4Wv2wrHj7r8nM+ztZWvdp3nb5xDCM+ssozPbQIcL33gwK9k/IjPqGPdT7xpHK9ClWvvtnbKD45rDo9RlmHvTtYjb3Kyik+XWB3PvUsir3IesO+W70uPmx2Qj1i0Jm96ZXjvUW2Lz5wAha+n12evTW3ID44tiw+7FhLPeTvl70B6SK+gbotPvfNE74XdJ69C9bfPb/FKj4oMq2+DfqZvbjRwD442CM+n5QRvh2Nir33ono92e4gPvyWXD18C4i9PAyBvjQJIj4o238+Yl6SvZ6eEL8vJyc+I5hkPQCCqb2FVpe+yUsoPrNaDb5nnbW9IKT2vA14JT7936m+G9m2vXbubj6IrB4+9ogGv3JKrb2FPv4+QekTPrWcqL6G85i96782PqsqDT4vaQi+K6SRvf4XDL4+cAo+Ko2nvro+l726zAc+hLwDPvRvBb8j0JG9SF/NPnAf8j17fqa+HWKBvfzlsT2kzeQ92vMEv42me71Tyrc+6IfPPZSVpb6APl69M85CPb1Iwj2siAS/GFlavVsxpT4nFK09WMykvszqP73ZaV88FeWfPaorBL/UzD698RSVPmC/ij1yHaS+cPImvZGogbyaPHs9bNoDv10+KL2YBoc+MAtRPTmEo761oxK9MYIqvY3hNj3QkgO/tgwWvQ1MdT4Nxww9lvyivgdtAr1TBIS9PGblPPZSA7/htAe9XD1fPhpakTzsgqK+3bHrvNb0rb1Vsjo8MBkDv32c+bwlS0s+FiCXOgEUor6WFdm8hTDUvb6tqbv74wK/Ow/qvKbuOD4WYXy86ayhvmh4zLy8ufe9/+yxvM1g9r3TSeC8QcPXvtGixbwCS6G+jqoSvaHIDL7xP/m8PXYCv87tHb2lIxM+aF8mvTZENL9kKBK94xHZPtUOYL3LMAK/a9revGxF9j0G3IS9FQo0v8kmy7zk9s4+cKqhvRwAAr9D7Ii8BKXUPUF3tr1T+p++mtJvvIy8Rr6aQ8O9weIBv4y1l7xqacA9uQvYvY7EM7/0UIi8lu/CPgPP9L3SwQG/nd8TvPytqT3uyAS+z4ufvhJz8bv1xFm+ryoLvlFc7r0zaT68PfYBv+OMDb5kaJ++jWGyvN3pX7457RO+35IBvw011rzqW4k95EoevuMOn77wN8u8X1tvvqanJL4pYwG//4PxvMTuUD2AAS++LDozv4co6bxTIqs+E1g9vmUSZb85ZbK8bikeP3erT75mCzO/+VcavN//oj5M/l2+CfZkv4oYSLuJqRs/a09wvl9zi79FORU8rVdmP6lPg75l+2S/8wfePPAmHD9veIy+YyEzv/T7ID034qY+u6KTvhY4Zb+Crzs9YHkhP+/NnL5fcDO/eFtvPd6otD5k+6O+XrYBv6Uhhj2TGqI9pSupvjbjM7+eX4k9kJTIPrFdsL4UMgK/gGuZPXm39z3lkrW+sxOhvs9fnj2btxa+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAOBXrz6GJNw+FEcSPK/n374Bv7M+K1pwPteePjkIPBG+Tia2PsEtIj2D/C27FIgaPhqOtj5tY3A+GoO+OT4IEr5/9bg+DlAiPXEbI7vgyhk+YF25PmJrcD4e/QY6wLcSvtnEuz6abSI9NA0au+UnGT7NLLw+QXJwPp7zJzpVTxO+WJS+PvIe3D6akRK7tGffvl37wj4qeHA+F58zvF3TE773YsU+dTfcPvfsYrznhuG+esrJPugbID+nobm8u8w8v/4x0D5xI1I/TTsZvdXDhL/Pmdg+ylEgP4Mzbr01q0G/ewPfPqor3T5uFpa92iL3vuBv4z5mq3M+x9upvZ8bW76r3+U+H7E0PXGfsr0EsVI9T1PmPvIjdj4ThLC9qe2Ivm7J6D4vyT49XXi7veOITbuIQ+k+faYWvkCZu72QxYQ+3sHnPr9zST0W+rC9+AB5vcxC6D4+EhS+iXezvX1OUD68x+Y+PZdTPXoiq70Srey9J0/nPu6VEb5C3q+9fDgZPnTa5T7vA6y+R72pvTv80z68aeI+EyEPvtLHmL2EdcU9U/vgPpvhqr7U1JS927S6PmmQ3T41+wy+FeWFvUS1TD2AJ9w+KOKpvgfZg71ghqQ+ssHYPigWC74eX229OY4UPKNd1z4p/6i+96BsvYTakD5f/NM+GzoGv8NzVb21jA4/453OPpEyqL4Z1ie96yd+PrdAyz7s5AW//IATvRsWBz+k5cU+5qKnvmuN0LygPmU+V4vCPtYaB76P36u8hCWdvXkxwT42TKe+63G4vORHVj7p2L0+6HcGvvoolrwJObm9q4C8Puv/pr5X+qS84x1JPqIpuT7R5wW+mcyEvCUN0r3V0rc+/7umvnOalbxNZj0+J320PhRABb9ImW68T5PxPqworz7qJTe/Ifunu8uHQj881ac+zC0FvywCJTwzYu4+e4GiPmmHpr54yZ4861M0Ptosnz4KSQW/sqO7PKMd8z4D2Jk+a9amvt+3BD2IAUI+zoGWPhh8Bb8dPRQ9o//7PusqkT52Vqe++Y48PdYwWD4n0o0+EpMHvpDaTT2OlIi9FHeMPgLUfT37Y0g9FquvvogZjT47Bgm+m0gsPZIBEb2/uos+ybWovi1iKT0mXoo+9FqIPms+Cr64hT89XD0VvAz5hj7mWam+scY+PTyNmD74lYM+oZsLvjUvVz3Ga6Y8kzCCPgSMbT0/2Vg9ALyCvprIgj5PJg2+Xu5DPZBhWz1DX4E+wa5nPZlRSD3uBWW+ivOBPvF8gD40/zU9CCIAv2WFhD5MDuQ+jf4MPVlkR78LFYk+BNd/PrlgmjwVffO+/qOLPjgSXz1G7Bg80D81vsIyjD4uLBC+qti9O5Lq8j2twYo+7i9ePS/KBTwoXzC+4E+LPnRgEL6gs5o7juz7PUXeiT4gb109PlHrO103LL79a4o+BY0QvqgyejtSzQE+8PmIPo3KXD0YLNA7YKsovj+HiT6y6H4+lnJIO6Yo6b7QE4w+Yj1cPS04xrvonyW+xKCMPt2nEL4PHBi8TR8EPnIuiz7kIl09NKnbu8qRKr75u4s+AWwQvqdpJLzi7f09QUqKPmkdXj1tkfe7nfcvvklvNTuvbhy+Yr3CuoXxlj6/Y5a5o5MtPc2FkDv9nNk6vvsSOqZ5HL5YnJE7HmqXPtuKI7sdLLK+7bUpPPX9Fj936hq8mrAcvn99tTxmz5k+gA5NvHQjKz2fteY8fWtyPJVdP7whKnI+NyLpPEypib7svuO719InPQAVvTy6vQU93uTIuxQuHr6DbsI870CqPooQF7xcFSU9sen4PGs9Qj2h2wm8ZPcevlpXAD0w9rI+KLo8vP94IT2g+Rw96f+IPTLPL7ztlm8+gHQiPfeTWr4LSMa7T2XbPgb4ED2ymvy+GxclO4V+bj60GtE8QExCvnEu6zs59do+QwSyPBTT8r6k3IA8v81tPiigSDyaAjO+FemmPKa22j6lVw88O17tvhfm7Dwhf20+5ysJut43LL76cgk9LMwWPXO7frvWo/49D3cMPVoMIr53hre6b+LUPp8A/zyRGxc9YZziO1Q4+z37hQI98yAivimAGTyKxdU+LBvrPN8nFj1BKJE8H+ACPsQc8TxQFG0+6himPM4NI77FhQs9YEraPjkCjDwXEuS+83IuPW2IbD5aDQY8xPcWviRfQT3BG9o+IXyrO6EC4L7dRGQ9JVZsPht/ZrvuoBK+CS13PWNVEj0wF9G7u/AXPkMaej2ZICO+jrJfuyjK4D5qDW09SugSPc3hrzuvxRQ+lf1vPQUoI75SjAc8shrhPiTwYj1THBI9tM6LPPkuGT463GU9/hNsPhlRpDz99Qy+G794PaHwDz1Xw408GCglPhSgez0oiGs+JzCoPMroAL7jO4c9xoPZPgeQkzzE9NK+lqKYPV6kHj8BHSA88u8yv5EEsj0gUNk+VNqJuzx0zr4jZ8M9eJ8eP5gOSbwdfjK/VcjcPYBu2T6Iw9a8VB/RvlUt7j3pcms+a9cMvTY8/r1UmPc9yN7ZPskCF70V3dq+KIMEPld9bD5wBzq9nCYWvvs9CT74hBU9hwpGvZSJBj5e/Qk+zNttPjRHO70tYTS+NL8OPlIDGz1gtUm9i2nQPZ6FDz56SyC+PF9BvWDMwT7pUAw+06YgPUFdIr05E5I9ix4NPq6GcD5yhRy9qDNvvgnuET5FNCU9TKgvvRmqPz2AwRI+HtkdvvrSK72ruqY+UZkPPt0qKj3AJRG9exmkPCFzED4q2XI+p4EPvVw2kb6DThU+wJLdPo29Jr3sTha/bCsePuvycz7O1la92nedvnEMIz6yyjM9tAhwvfeDAr2T8iM+oY91PvGkcr0KVa++2dsoPjmsOj1GWYe9O1iNvcrKKT5dYHc+9SyKvch6w75bvS4+bHZCPWLQmb3pleO9RbYvPnACFr6fXZ69NbcgPji2LD7sWEs95O+XvQHpIr6Bui0+980Tvhd0nr0L1t89v8UqPigyrb4N+pm9uNHAPjjYIz6flBG+HY2Kvfeiej3Z7iA+/JZcPXwLiL08DIG+NAkiPijbfz5iXpK9np4Qvy8nJz4jmGQ9AIKpvYVWl77JSyg+s1oNvmedtb0gpPa8DXglPv3fqb4b2ba9du5uPoisHj72iAa/ckqtvYU+/j5B6RM+tZyovobzmL3rvzY+qyoNPi9pCL4rpJG9/hcMvj5wCj4qjae+uj6XvbrMBz6EvAM+9G8FvyPQkb1IX80+cB/yPXt+pr4dYoG9/OWxPaTN5D3a8wS/jaZ7vVPKtz7oh889lJWlvoA+Xr0zzkI9vUjCPayIBL8YWVq9WzGlPicUrT1YzKS+zOo/vdlpXzwV5Z89qisEv9TMPr3xFJU+YL+KPXIdpL5w8ia9kaiBvJo8ez1s2gO/XT4ovZgGhz4wC1E9OYSjvrWjEr0xgiq9jeE2PdCSA7+2DBa9DUx1Pg3HDD2W/KK+B20CvVMEhL08ZuU89lIDv+G0B71cPV8+GlqRPOyCor7dseu81vStvVWyOjwwGQO/fZz5vCVLSz4WIJc6ARSivpYV2byFMNS9vq2pu/vjAr87D+q8pu44PhZhfLzprKG+aHjMvLy5973/7LG8zWD2vdNJ4LxBw9e+0aLFvAJLob6OqhK9ocgMvvE/+bw9dgK/zu0dvaUjEz5oXya9NkQ0v2QoEr3jEdk+1Q5gvcswAr9r2t68bEX2PQbchL0VCjS/ySbLvOT2zj5wqqG9HAACv0PsiLwEpdQ9QXe2vVP6n76a0m+8jLxGvppDw73B4gG/jLWXvGppwD25C9i9jsQzv/RQiLyW78I+A8/0vdLBAb+d3xO8/K2pPe7IBL7Pi5++EnPxu/XEWb6vKgu+UVzuvTNpPrw99gG/44wNvmRon76NYbK83elfvjntE77fkgG/DTXWvOpbiT3kSh6+4w6fvvA3y7xfW2++pqckviljAb//g/G8xO5QPYABL74sOjO/hyjpvFMiqz4TWD2+ZRJlvzllsrxuKR4/d6tPvmYLM7/5Vxq83/+iPkz+Xb4J9mS/ihhIu4mpGz9rT3C+X3OLv0U5FTytV2Y/qU+DvmX7ZL/zB9488CYcP294jL5jITO/9PsgPTfipj67opO+Fjhlv4KvOz1geSE/782cvl9wM794W2893qi0PmT7o75etgG/pSGGPZMaoj2lK6m+NuMzv55fiT2QlMg+sV2wvhQyAr+Aa5k9ebf3PeWStb6zE6G+z1+ePZu3Fr6cy7i+CcACv3dYmD0C6Sw+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.3704218566417694, 0.862652063369751, 0.6408324241638184, 0.7067926526069641, 0.6424024105072021, 0.704544723033905, 0.643830418586731, 0.7023808360099792, 0.35486194491386414, 0.867941677570343, 0.3351086974143982, 0.12471930682659149, 0.06076493486762047, 0.9595835208892822, 0.9443088173866272, 0.9071905016899109, 0.8101104497909546, 0.4039580225944519, 0.8424755930900574, 0.6691440939903259, 0.5873192548751831, 0.7227833867073059, 0.518470823764801, 0.7663354277610779, 0.5499677658081055, 0.6909266710281372, 0.6000205874443054, 0.6496092677116394, 0.6416495442390442, 0.6098460555076599, 0.6762430667877197, 0.4280563294887543, 0.7881345748901367, 0.44517654180526733, 0.7843468189239502, 0.5491093993186951, 0.720304012298584, 0.5353097915649414, 0.7302203178405762, 0.5228539109230042, 0.7390195727348328, 0.4885759651660919, 0.23121054470539093, 0.9006240367889404, 0.7968468070030212, 0.43280747532844543, 0.8218129277229309, 0.3880397081375122, 0.8501409888267517, 0.6692323088645935, 0.37749165296554565, 0.831940233707428, 0.5919114947319031, 0.7414047122001648, 0.5435774922370911, 0.7814743518829346, 0.5154067277908325, 0.7675591707229614, 0.5614597201347351, 0.25875985622406006, 0.11049404740333557, 0.9392894506454468, 0.8906344771385193, 0.7393347024917603, 0.5849305391311646, 0.7357144951820374, 0.5903993248939514, 0.7325564622879028, 0.5951409339904785, 0.27020227909088135, 0.890124499797821, 0.7414395809173584, 0.5737789273262024, 0.747101902961731, 0.5647081136703491, 0.43349379301071167, 0.8456490635871887, 0.422675758600235, 0.14891332387924194, 0.9358048439025879, 0.866979718208313, 0.6477257013320923, 0.7104734182357788, 0.3343040645122528, 0.8837423920631409, 0.3000265955924988, 0.8944603800773621, 0.737734317779541, 0.39631572365760803, 0.8532589673995972, 0.40593230724334717, 0.8532748222351074, 0.40211984515190125, 0.8582062125205994, 0.614926278591156, 0.25683698058128357, 0.9029576182365417, 0.24387361109256744, 0.907710075378418, 0.7756183743476868, 0.44150394201278687, 0.8408247232437134, 0.43454691767692566, 0.8470814824104309, 0.5849999189376831, 0.23604242503643036, 0.9088169932365417, 0.22439728677272797, 0.9132001399993896, 0.7937383055686951, 0.5279405117034912, 0.804244339466095, 0.49281221628189087, 0.1818321794271469, 0.9264438152313232, 0.16466377675533295, 0.9328151345252991, 0.8593114614486694, 0.37187713384628296, 0.8782414197921753, 0.686177134513855, 0.6617769002914429, 0.728885293006897, 0.3895016610622406, 0.8505775928497314, 0.5772995948791504, 0.786056637763977, 0.47462278604507446, 0.8140077590942383, 0.49266523122787476, 0.17261965572834015, 0.9300323128700256, 0.8569923639297485, 0.3558591902256012, 0.8785298466682434, 0.29066774249076843, 0.8968467712402344, 0.7688615322113037, 0.5123347043991089, 0.8096652626991272, 0.567115306854248, 0.7342634797096252, 0.3765783905982971, 0.13958685100078583, 0.9334431290626526, 0.8842563033103943, 0.7608109712600708, 0.5036918520927429, 0.7659969925880432, 0.4404829740524292, 0.8249092102050781, 0.6225041151046753, 0.6757571697235107, 0.6640529632568359, 0.6374146938323975, 0.696735143661499, 0.6027605533599854, 0.7227198481559753, 0.5718508958816528, 0.7437239289283752, 0.5443801283836365, 0.7610464096069336, 0.5198507905006409, 0.7756580710411072, 0.49768534302711487, 0.7882869243621826, 0.4772900938987732, 0.7994855642318726, 0.4580875635147095, 0.19032010436058044, 0.9116104245185852, 0.8271104693412781, 0.6045768857002258, 0.7105083465576172, 0.6136308908462524, 0.7094883918762207, 0.3857044577598572, 0.8438200950622559, 0.6264723539352417, 0.7051078677177429, 0.37881338596343994, 0.15192367136478424, 0.9262439608573914, 0.8586941361427307, 0.3390149474143982, 0.8663967251777649, 0.6815566420555115, 0.35366547107696533, 0.8646475076675415, 0.33449050784111023, 0.12341172993183136, 0.9437460899353027, 0.8987603187561035, 0.23489360511302948, 0.9163520932197571, 0.8179803490638733, 0.4328829050064087, 0.8565827012062073, 0.6541871428489685, 0.6612712144851685], "advantages": [0.8562566637992859, 0.13267900049686432, 0.19159622490406036, 0.47047892212867737, -0.5777991414070129, -0.3338415324687958, -1.3717912435531616, -1.1614481210708618, -2.1901793479919434, -2.8175485134124756, -3.016653537750244, -3.589956521987915, -3.196915626525879, -0.936923086643219, -3.474667549133301, -5.0085225105285645, -5.810187339782715, -6.1565775871276855, -6.966286659240723, -7.394003868103027, -7.659208297729492, -8.722640037536621, -9.01205062866211, -10.068300247192383, -10.387497901916504, -10.76607894897461, -11.859007835388184, -12.2404203414917, -13.349700927734375, -13.737081527709961, -14.865225791931152, -15.260808944702148, -15.791305541992188, -16.885093688964844, -17.416196823120117, -18.52495002746582, -19.714616775512695, -20.119800567626953, -21.337295532226562, -21.749876022338867, -22.9976749420166, -23.41666603088379, -23.96413230895996, -24.559900283813477, -25.809267044067383, -27.0604190826416, -27.578529357910156, -28.857717514038086, -29.355125427246094, -30.662277221679688, -32.123741149902344, -33.61427307128906, -33.80275344848633, -34.068180084228516, -35.61397933959961, -35.84345245361328, -37.437870025634766, -39.15348815917969, -39.1553955078125, -40.92556381225586, -42.536808013916016, -43.56732177734375, -43.932865142822266, -44.057289123535156, -44.17548751831055, -45.95310592651367, -46.07291030883789, -47.88583755493164, -48.00776290893555, -49.85635757446289, -51.50632095336914, -51.687564849853516, -51.880245208740234, -53.752532958984375, -53.95876693725586, 16.800418853759766, 17.167081832885742, 16.44361686706543, 16.810150146484375, 18.162490844726562, 16.310836791992188, 15.591723442077637, 15.698098182678223, 15.32346248626709, 15.739047050476074, 14.946310997009277, 15.394089698791504, 14.573724746704102, 14.539593696594238, 15.436141967773438, 14.322662353515625, 15.242505073547363, 14.12369441986084, 15.080483436584473, 13.946696281433105, 13.799705505371094, 14.582615852355957, 13.400273323059082, 14.21284294128418, 13.019174575805664, 12.789155960083008, 13.546785354614258, 12.623613357543945, 13.403199195861816, 12.484156608581543, 12.581799507141113, 13.71827220916748, 12.192377090454102, 13.367371559143066, 11.826988220214844, 11.387402534484863, 11.639986991882324, 11.143689155578613, 11.68420696258545, 13.833220481872559, 11.818547248840332, 14.190725326538086, 12.068867683410645, 11.512876510620117, 12.141493797302246, 11.625069618225098, 12.483038902282715, 11.46791934967041, 12.392973899841309, 14.363967895507812, 11.919318199157715, 10.838932991027832, 11.819147109985352, 13.793970108032227, 11.315160751342773, 10.1919527053833, 10.893885612487793, 10.449422836303711, 11.507760047912598, 10.366683959960938, 11.408164024353027, 10.329667091369629, 11.325005531311035, 13.12551498413086, 10.775116920471191, 12.52396011352539, 14.353045463562012, 11.462501525878906, 9.224157333374023, 8.7572021484375, 9.283324241638184, 10.888875007629395, 12.7832670211792, 14.483699798583984, 11.475472450256348, 8.645523071289062, 10.7061128616333, 12.696712493896484, 9.534913063049316, 11.67943286895752, 8.439910888671875, 10.71435546875, 7.443734169006348, 9.808943748474121, 6.577042102813721, 8.977127075195312, 5.871778964996338, 8.237143516540527, 5.350921154022217, 7.606163024902344, 5.018596649169922, 7.093194484710693, 4.855752468109131, 6.694051742553711, 4.824299335479736, 4.579370975494385, 5.330660343170166, 6.647513389587402, 8.090636253356934, 6.073055267333984, 7.258737087249756, 5.546675205230713, 4.932263374328613, 5.372824668884277, 6.054940700531006, 4.895090579986572, 4.693336009979248, 5.598761558532715, 5.041888236999512, 4.79701566696167, 5.10745906829834, 4.651683330535889, 4.441667079925537, 4.534855842590332, 3.645660877227783, 3.6229147911071777, 3.9138808250427246, 2.6262741088867188, 1.9431759119033813, 1.8512492179870605, 1.186320185661316, 1.0952794551849365, 0.5830588936805725, 0.45987290143966675, 0.6946868896484375]}
+{"type": "SampleBatch", "eps_id": [1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1906370738, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAJzLuL4JwAK/d1iYPQLpLD5+Br6+Xi6ivhFDnz3Cssu93ETBvjyQ+70iMJs9Rbe7vtyGwr7mTKO+tiuMPXAbUb31ysW+P7v/vWUUij3Ui6S+SxLHvsRKpL761Hk9D6oGvHhbyr6XTgS/myh5PaErmz5Lps++Yy6lvjL+iD00K/c8BfTSvi/FBL+SOoo9OravPpZD2L4o8za/JkmYPasHKD/+lN++XUgFv6grsz0hp8Y+z+nkvtNZp74REMM90DX+PaVC6L68agi+nyXIPdpXDL7fn+m+xMCovoKIwj3WOT0+4//svgc3C74uGso9AqKcvUdk7r4eCmw9OPjGPetFrL42ze2+vqKAPhEwuT1a1Ri/mTrrvhExYT0DvKA9sPWNvnqq6r73jhC+rWCVPWkqID2LHOy+b2hYPbP6lj3IN2u+C5LrvrGyEr4Sko09/qyuPZcJ7b6vGlA9aRCRPfU9Pb5nhOy+VL0UvpJ+iT03dQQ+LQHuvnASSD3wyo49dM8QviGB7b6su3g+FQCJPbaX0r5gBOu+SRpAPUpOcD3NWMm9bonqvnmWGL5/QGg9oWFZPg4Q7L4uXjk9d6R5Pa/jfb1rmeu+RDN1Po2QdD0tb6u+tCXpvnlRMj2bIlk9WhLEvJWz6L4JkXM+qSxXPSBPmb4NROa+pCUsPR+lPj0PHRk84NXlvqggcj4baT89MF6Jvghq477rsCY9hG4pPQK6Hj1Z/+K+lNhwPjKbLD16X3a+yJbgvk3OIT165Rg9BEWFPTov4L5DsG8+KToePf7GXL6gyd2+u1wdPaSQDD1iS7Y96mTdvv2fbj5W2xM9HEZFvggC275QPhk9KhMEPbu74z31n9q+taBtPigvDT1aPy++oT/Yvox02j4sVP48Ps/nviPh076lq2w+Via0PAYPGr5Dg9G+jTMSPReAmzw/qxg+sSXRvloSbD5p7bM8StgMvlnJzr6vzg89aGSdPNLfJT5Qbc6+QXhrPp7utzyGIP+9gxLMvhB42T6dhaM8PffRvhK5x76hnB4/cKpAPB9IMr/iYMG+Hz3ZPq4jDruN08y+nwi9vke0aj6snya8/Erdvcevur4SVNk+1QdKvI3Pzr4PV7a+gAhrPtoxp7yW1+u9X/2zvpUiDj3mD7q8LB8vPmiis75J7iO+6QqevL+z6T4SRrW+aJcQPRCEJrzPiCE+iOm0vqgZbD5UpuW7NXANvh2Nsr5qmBE9yhUgvBv/Gz7vL7K+6kIjvh1V3Lv5Q+I+4tGzvu17tb4kkwo72oM7PxVzt757MSO+21SJPH2E4T7bFLm+WnURPUF/0Tx5yRw+w7e4vv/Iaz5Aleo8PowGvidctr4zSg49Kw7VPJBELj4WAba+iPxqPi7w8Dyi2Om9haezvngs2T4CO948s33LvpdPr77PKGo+Bh2dPP9Jxb0l+Ky+udbYPoxUjTzqC8S+7qGovotOHj/TMB08fYsrv95Mor7BpNg+Aox5u4myv76n952+sYlpPrUSOby/1qm9zKGbvtvA2D5OP1S8iCDCvgVMl77WXx4/iz6ovF4HLb9E9pC+1f7YPsp9C73qjMe+P5+Mvn+RHj9gayu9d2Yxv0pw+DrO5co8yy3IuVC9Iz3Vrhw7NixhPof+2jmpcoG+tnPeO8jkyjyTAZi7JMkjPfmu7jsBPGE+05l7u8Iggr6waj88L4zUPp8uErzYTAy/PrmjPHlrYT4X4qK8XDGEvnDKxzxZSs88Uy/NvNOo5jzE78s8S4QtvlmSyLykK6A+hSywPEQX1TwvUZW8rEtNPItvtDxk5Sy+oUOTvOBLmT65xZg82FTZPC1rRLxqZJE6dR6dPNd1LL4gDkS8AnmUPnyGgTwxI9w8uBDKuzZC07uW7YU8UzIsvl1KzrunjZE+1cBUPOeY3TyX2x+6YAgqvP2dXTx0g2M+rERWugJEm77yNZM8rsPdPAmG4bvIaDG8YaWXPDSgYz61nui7Y4Gcvu8QvDzoZt88NHlYvAGtebzAiMA87etjPot3Xbx7xp++awDlPEaI4jyR3KG8COvBvESI6TzhZ2Q+bb2lvKwhpb7iCQc96zbnPACV2ry0ohS9y1kJPYAWZT4Hh+C8ea6svoOtGz2ai+08jeQLvduIWr2hDR49+JcpvnJDEL2U12k+WnwQPRWq9Ty5HPu87wyavUDxEj3p8mY+17cDvXU8wb4VayU97QL9PMuiIr1MpcK9y/InPceSJ773ayq9bEo9PuSKGj2hQgM9Skcbvbob973yKh09jydpPq4pJb38oNm+89EvPZPUBz3B+0e9YdcUvmaJMj0GGSW+B+RTvWSxBj4zVCU9mQ22voQdSb2intA+TTMIPfufI750vCe9HDXMPYc49jzWBRM9X5EfvVaKUr4LGvw8CnUivjxpML1np5g9yxviPELNtL4PTiq9Vue0PoNAqDwlOCG+P1wNvVjoQz34dI48QUwcPTNxCb0e1oK+drWUPGNabz5CYB69YwEPv10Buzw0gCA9QyNMvT+Fjr7kbME8rOoevufwYr3iYuy6qf+nPLbusr65FmO9F7mLPgd7XTxFNAu/q7tMveQuDD9sMi07USqyvt3fH71lVnU+/nONu7/iCr9aPwy9jgsFPxKAeLyIobG+l1jDvAuhXT6RF7W8aq0Kv6nin7xhYgA/POwGvXhQsb5bcBu895hPPghLI72prxq+7wOyu1hRvr0Bqy+9mDSxvsTq7rvMyUo+VgVMve2GCr/eQ1q7zRD6PnlZeL2wILG+kvPSO9xZRz5QWIq9nosavp9EKTwPisS924aQvQpAsb5f0gk8ysFMPvG0nr3W0Rq+EFhLPB9xuL1L5qS9+GaxvlPVLTwveFM+fhezvRQoG77ngHE8MJWpvUtMub1ZlrG+y15WPPujWz5Igce9UZAbvuBTjjxQn5e9QbrNvUvtLz2mMoI8Te24vuL3y73+DBy+3QoOPPcdgr3XNdK9rP2xvopy8jvRb20+GHPgvTZJHL4XNEU8MnxvvXaz5r36IbK+cAsyPCCzcz6f8/S9Cp8cvqgDgDza6FG9bDf7vXjNKz1ePG88FIytvpx/+b0pinI+ZioAPDaDIL8AzO+94H4qPbWUmrtT6qm+iBfuvaUcHb5ICTq8IpomvVxg9L3EX7K+S11HvMJdfj67UgG+fcQcvjnv67uk+0S9YXUEvrqRLD3SuQW8m6Kvvn6YA77uihy+5CF2vA3XWL39uQa+7uotPWO9g7ytXbO+YNsFvmgWHL4LI7280YWAvYv6CL6RMTA9MGvHvO6qub4EGQi+yGQbvotqAb18LZ+9oTQLvjlcsb6GyAe93b1RPs1MEr5Wchq+BwLuvPX2yL2RYxW+kxY3PckV/rzutsy+N3kUvo2RGb4AzB+94MvvvXyLF77FY7C+hGMpvd3yJj63mR6+DP0Jv2gIHL0SgeI+uKMpvjbPr76Xle+8FkINPgKsML7UZRe+qPvYvJu/J74qszO+9GavvqLS87wHkfY9SLc6vnaTFr70GOC8puI5vju6Pb4T/K6+0db9vGS30T0TukS+klMJv9MP7bw0MsU+hbZPvt6Lrr6F9a28ofmqPeCxVr4VJQm/80egvPkgvT6aqmG+bj+uvgmFR7w9mZA95qJovpqDFL5HYjC8FlFnvkqba75yFK6+vmd6vGjJgT3ekXK+RyIUvq6jZbwZtm++UIh1vhl+Tz1qLJm8XfoHv7l+dL5KqBO+DDPwvBhMer67cne+CkFSPZwfDL3W1Au/m2V2voW7Er6Z3ji9QG2HvuBUeb6q7Ky+rolOvW/wXjzPH4C+KjgIv1FsTb0odpQ+spKFvnIwrL5VqzW9NFGUvE4Eib5G4Ae/Bic3veNAhT6tc46+t4mrvvbUIb1RKz299OGRvtORB7+CnSW9CWNvPi9Ol77+XTm/3XYSvYpwAz9YuJ6+Vi9rv6rO0LyBbUs/oyCovoQlOb8fOh286PP8PoqIr744KQe/CDWVOf0dSz6W8LS+/xw5vwdSiztMdfs+JVi8vm8tB7/tl2Y8lJVMPl3Awb6Yjqq+vAeUPPIVtb2eKcW+eUsHvxmLhTwk8lY+CZPKvgNOOb9I76c83vcBP4780b7RbQe/Rx37PDfaYj5ZZ9e+wC+rvpO0Dz1eLXu909PaviyrB7+Mrgo9hAh4PhJB4L7OsKu+RYYePUItIr0gsOO+2O4Hv+1HGz3ks4c+FCDpvjNArL5P/jA9Zwp9vAGS7L6PWxG+arovPaCflr4eBu6+beGsvuCgFz2x5T88lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAH4Gvr5eLqK+EUOfPcKyy73cRMG+PJD7vSIwmz1Ft7u+3IbCvuZMo762K4w9cBtRvfXKxb4/u/+9ZRSKPdSLpL5LEse+xEqkvvrUeT0Pqga8eFvKvpdOBL+bKHk9oSubPkumz75jLqW+Mv6IPTQr9zwF9NK+L8UEv5I6ij06tq8+lkPYvijzNr8mSZg9qwcoP/6U375dSAW/qCuzPSGnxj7P6eS+01mnvhEQwz3QNf49pULovrxqCL6fJcg92lcMvt+f6b7EwKi+gojCPdY5PT7j/+y+BzcLvi4ayj0Copy9R2Tuvh4KbD04+MY960WsvjbN7b6+ooA+ETC5PVrVGL+ZOuu+ETFhPQO8oD2w9Y2+eqrqvveOEL6tYJU9aSogPYsc7L5vaFg9s/qWPcg3a74Lkuu+sbISvhKSjT3+rK49lwntvq8aUD1pEJE99T09vmeE7L5UvRS+kn6JPTd1BD4tAe6+cBJIPfDKjj10zxC+IYHtvqy7eD4VAIk9tpfSvmAE675JGkA9Sk5wPc1Yyb1uieq+eZYYvn9AaD2hYVk+DhDsvi5eOT13pHk9r+N9vWuZ675EM3U+jZB0PS1vq760Jem+eVEyPZsiWT1aEsS8lbPovgmRcz6pLFc9IE+Zvg1E5r6kJSw9H6U+PQ8dGTzg1eW+qCByPhtpPz0wXom+CGrjvuuwJj2Ebik9AroePVn/4r6U2HA+MpssPXpfdr7IluC+Tc4hPXrlGD0ERYU9Oi/gvkOwbz4pOh49/sZcvqDJ3b67XB09pJAMPWJLtj3qZN2+/Z9uPlbbEz0cRkW+CALbvlA+GT0qEwQ9u7vjPfWf2r61oG0+KC8NPVo/L76hP9i+jHTaPixU/jw+z+e+I+HTvqWrbD5WJrQ8Bg8avkOD0b6NMxI9F4CbPD+rGD6xJdG+WhJsPmntszxK2Ay+WcnOvq/ODz1oZJ080t8lPlBtzr5BeGs+nu63PIYg/72DEsy+EHjZPp2Fozw999G+ErnHvqGcHj9wqkA8H0gyv+Jgwb4fPdk+riMOu43TzL6fCL2+R7RqPqyfJrz8St29x6+6vhJU2T7VB0q8jc/Ovg9Xtr6ACGs+2jGnvJbX671f/bO+lSIOPeYPurwsHy8+aKKzvknuI77pCp68v7PpPhJGtb5olxA9EIQmvM+IIT6I6bS+qBlsPlSm5bs1cA2+HY2yvmqYET3KFSC8G/8bPu8vsr7qQiO+HVXcu/lD4j7i0bO+7Xu1viSTCjvagzs/FXO3vnsxI77bVIk8fYThPtsUub5adRE9QX/RPHnJHD7Dt7i+/8hrPkCV6jw+jAa+J1y2vjNKDj0rDtU8kEQuPhYBtr6I/Go+LvDwPKLY6b2Fp7O+eCzZPgI73jyzfcu+l0+vvs8oaj4GHZ08/0nFvSX4rL651tg+jFSNPOoLxL7uoai+i04eP9MwHTx9iyu/3kyivsGk2D4CjHm7ibK/vqf3nb6xiWk+tRI5vL/Wqb3MoZu+28DYPk4/VLyIIMK+BUyXvtZfHj+LPqi8Xgctv0T2kL7V/tg+yn0LveqMx74/n4y+f5EeP2BrK713ZjG/gkeGvj2L2T4BMGS9CdfTvtWuHDs2LGE+h/7aOalygb62c947yOTKPJMBmLskySM9+a7uOwE8YT7TmXu7wiCCvrBqPzwvjNQ+ny4SvNhMDL8+uaM8eWthPhfiorxcMYS+cMrHPFlKzzxTL82806jmPMTvyzxLhC2+WZLIvKQroD6FLLA8RBfVPC9RlbysS008i2+0PGTlLL6hQ5O84EuZPrnFmDzYVNk8LWtEvGpkkTp1Hp0813UsviAORLwCeZQ+fIaBPDEj3Dy4EMq7NkLTu5bthTxTMiy+XUrOu6eNkT7VwFQ855jdPJfbH7pgCCq8/Z1dPHSDYz6sRFa6AkSbvvI1kzyuw908CYbhu8hoMbxhpZc8NKBjPrWe6LtjgZy+7xC8POhm3zw0eVi8Aa15vMCIwDzt62M+i3ddvHvGn75rAOU8RojiPJHcobwI68G8RIjpPOFnZD5tvaW8rCGlvuIJBz3rNuc8AJXavLSiFL3LWQk9gBZlPgeH4Lx5rqy+g60bPZqL7TyN5Au924havaENHj34lym+ckMQvZTXaT5afBA9Far1PLkc+7zvDJq9QPESPenyZj7XtwO9dTzBvhVrJT3tAv08y6IivUylwr3L8ic9x5InvvdrKr1sSj0+5IoaPaFCAz1KRxu9uhv3vfIqHT2PJ2k+riklvfyg2b7z0S89k9QHPcH7R71h1xS+ZokyPQYZJb4H5FO9ZLEGPjNUJT2ZDba+hB1JvaKe0D5NMwg9+58jvnS8J70cNcw9hzj2PNYFEz1fkR+9VopSvgsa/DwKdSK+PGkwvWenmD3LG+I8Qs20vg9OKr1W57Q+g0CoPCU4Ib4/XA29WOhDPfh0jjxBTBw9M3EJvR7Wgr52tZQ8Y1pvPkJgHr1jAQ+/XQG7PDSAID1DI0y9P4WOvuRswTys6h6+5/BiveJi7Lqp/6c8tu6yvrkWY70XuYs+B3tdPEU0C7+ru0y95C4MP2wyLTtRKrK+3d8fvWVWdT7+c427v+IKv1o/DL2OCwU/EoB4vIihsb6XWMO8C6FdPpEXtbxqrQq/qeKfvGFiAD887Aa9eFCxvltwG7z3mE8+CEsjvamvGr7vA7K7WFG+vQGrL72YNLG+xOruu8zJSj5WBUy97YYKv95DWrvNEPo+eVl4vbAgsb6S89I73FlHPlBYir2eixq+n0QpPA+KxL3bhpC9CkCxvl/SCTzKwUw+8bSevdbRGr4QWEs8H3G4vUvmpL34ZrG+U9UtPC94Uz5+F7O9FCgbvueAcTwwlam9S0y5vVmWsb7LXlY8+6NbPkiBx71RkBu+4FOOPFCfl71Bus29S+0vPaYygjxN7bi+4vfLvf4MHL7dCg489x2Cvdc10r2s/bG+inLyO9FvbT4Yc+C9Nkkcvhc0RTwyfG+9drPmvfohsr5wCzI8ILNzPp/z9L0Knxy+qAOAPNroUb1sN/u9eM0rPV48bzwUjK2+nH/5vSmKcj5mKgA8NoMgvwDM773gfio9tZSau1Pqqb6IF+69pRwdvkgJOrwimia9XGD0vcRfsr5LXUe8wl1+PrtSAb59xBy+Oe/ru6T7RL1hdQS+upEsPdK5Bbyboq++fpgDvu6KHL7kIXa8DddYvf25Br7u6i09Y72DvK1ds75g2wW+aBYcvgsjvbzRhYC9i/oIvpExMD0wa8e87qq5vgQZCL7IZBu+i2oBvXwtn72hNAu+OVyxvobIB73dvVE+zUwSvlZyGr4HAu689fbIvZFjFb6TFjc9yRX+vO62zL43eRS+jZEZvgDMH73gy++9fIsXvsVjsL6EYym93fImPreZHr4M/Qm/aAgcvRKB4j64oym+Ns+vvpeV77wWQg0+AqwwvtRlF76o+9i8m78nviqzM770Zq++otLzvAeR9j1Itzq+dpMWvvQY4Lym4jm+O7o9vhP8rr7R1v28ZLfRPRO6RL6SUwm/0w/tvDQyxT6Ftk++3ouuvoX1rbyh+ao94LFWvhUlCb/zR6C8+SC9PpqqYb5uP66+CYVHvD2ZkD3momi+moMUvkdiMLwWUWe+SptrvnIUrr6+Z3q8aMmBPd6Rcr5HIhS+rqNlvBm2b75QiHW+GX5PPWosmbxd+ge/uX50vkqoE74MM/C8GEx6vrtyd74KQVI9nB8MvdbUC7+bZXa+hbsSvpneOL1AbYe+4FR5vqrsrL6uiU69b/BePM8fgL4qOAi/UWxNvSh2lD6ykoW+cjCsvlWrNb00UZS8TgSJvkbgB78GJze940CFPq1zjr63iau+9tQhvVErPb304ZG+05EHv4KdJb0JY28+L06Xvv5dOb/ddhK9inADP1i4nr5WL2u/qs7QvIFtSz+jIKi+hCU5vx86Hbzo8/w+ioivvjgpB78INZU5/R1LPpbwtL7/HDm/B1KLO0x1+z4lWLy+by0Hv+2XZjyUlUw+XcDBvpiOqr68B5Q88hW1vZ4pxb55Swe/GYuFPCTyVj4Jk8q+A045v0jvpzze9wE/jvzRvtFtB79HHfs8N9piPlln177AL6u+k7QPPV4te73T09q+LKsHv4yuCj2ECHg+EkHgvs6wq75Fhh49Qi0ivSCw477Y7ge/7UcbPeSzhz4UIOm+M0Csvk/+MD1nCn28AZLsvo9bEb5qui89oJ+Wvh4G7r5t4ay+4KAXPbHlPzxEe/G+9YQSvoGWGD3Xwom+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.7213780879974365, 0.4139295220375061, 0.821854829788208, 0.47617658972740173, 0.7916843295097351, 0.46624302864074707, 0.8372516632080078, 0.39363035559654236, 0.1346515417098999, 0.9414415955543518, 0.8930035829544067, 0.761457622051239, 0.49698659777641296, 0.8107317090034485, 0.590855062007904, 0.3267541229724884, 0.839712917804718, 0.628971517086029, 0.6975117921829224, 0.5732042193412781, 0.7396360635757446, 0.516476035118103, 0.775055468082428, 0.5403838753700256, 0.7092104554176331, 0.4202454090118408, 0.8226398229598999, 0.6275216937065125, 0.6324013471603394, 0.6606491208076477, 0.6001468300819397, 0.6882561445236206, 0.5700432062149048, 0.7115375399589539, 0.5418425798416138, 0.7315053343772888, 0.5151776075363159, 0.7489808201789856, 0.48962071537971497, 0.7646146416664124, 0.5352729558944702, 0.7173808813095093, 0.45746511220932007, 0.7826154828071594, 0.4412807524204254, 0.791641116142273, 0.5755326151847839, 0.30852392315864563, 0.8614036440849304, 0.7151077389717102, 0.5467969179153442, 0.7358790636062622, 0.480135053396225, 0.2292848378419876, 0.8930206298828125, 0.7712062001228333, 0.49097132682800293, 0.23212605714797974, 0.10669825226068497, 0.9389421939849854, 0.8982293605804443, 0.7953478097915649, 0.4254602789878845, 0.8066340088844299, 0.597457230091095, 0.6792062520980835, 0.604806125164032, 0.3208724558353424, 0.8617354035377502, 0.7043945789337158, 0.5751790404319763, 0.2738915681838989, 0.8897159099578857, 0.22978118062019348, 0.9097772836685181, 0.6228432655334473, 0.7435401082038879, 0.61360764503479, 0.2477831095457077, 0.9095168709754944, 0.7801698446273804, 0.4525766372680664, 0.8327217102050781, 0.46640169620513916, 0.8284577131271362, 0.4737626910209656, 0.8268309235572815, 0.4748761057853699, 0.8278769254684448, 0.5301427841186523, 0.8026209473609924, 0.5149714350700378, 0.8121863007545471, 0.4923042953014374, 0.8245675563812256, 0.46186426281929016, 0.8391351103782654, 0.42364269495010376, 0.8551170229911804, 0.6217204332351685, 0.7291638255119324, 0.3495219051837921, 0.8799537420272827, 0.6966665983200073, 0.6533008217811584, 0.27202799916267395, 0.9022729992866516, 0.7715863585472107, 0.4595305919647217, 0.8235061764717102, 0.5077021718025208, 0.8155178427696228, 0.5410616993904114, 0.7819459438323975, 0.43156698346138, 0.15438759326934814, 0.9313734769821167, 0.8684849143028259, 0.6835302114486694, 0.34543290734291077, 0.8698524832725525, 0.36346012353897095, 0.8670570850372314, 0.3677535951137543, 0.8692216873168945, 0.6417256593704224, 0.718813419342041, 0.3588016629219055, 0.8764611482620239, 0.6657156944274902, 0.6944708824157715, 0.6801754236221313, 0.6809449791908264, 0.6964210867881775, 0.6643675565719604, 0.7144343852996826, 0.3558897376060486, 0.8658710718154907, 0.6425511240959167, 0.7324427366256714, 0.6258729696273804, 0.7475165128707886, 0.39534711837768555, 0.1475498229265213, 0.9318910241127014, 0.8624523282051086, 0.637840747833252, 0.7293006181716919, 0.36049243807792664, 0.869653582572937, 0.3399391770362854, 0.8777683973312378, 0.31278350949287415, 0.8874017000198364, 0.7198340892791748, 0.6339855790138245, 0.26065030694007874, 0.9031516313552856, 0.7724509835243225, 0.44871971011161804, 0.8334833979606628, 0.536676824092865, 0.798795223236084, 0.5082334280014038, 0.8131476044654846, 0.5221230387687683, 0.7985019683837891, 0.5302758812904358, 0.7966148853302002, 0.4706451892852783, 0.825904905796051, 0.45574015378952026, 0.16687451303005219, 0.9280266165733337, 0.1487257480621338, 0.9340620040893555, 0.8719624280929565, 0.6773000359535217, 0.6666180491447449, 0.7052404880523682, 0.6365298628807068, 0.7281424403190613, 0.39164525270462036, 0.14720593392848969, 0.9347227811813354, 0.8628158569335938, 0.35975733399391174, 0.8720171451568604, 0.6704529523849487, 0.6713008284568787, 0.3087306618690491, 0.8904967904090881, 0.7295355796813965, 0.5921083092689514, 0.7583100199699402, 0.5478333234786987, 0.786098062992096, 0.5016189813613892, 0.7788039445877075, 0.5348271131515503], "advantages": [-1.2552050352096558, -1.0517654418945312, -0.4892127811908722, -1.290427327156067, -0.7073538899421692, -1.5826246738433838, -2.528796434402466, -2.309816837310791, -3.239999294281006, -3.818681478500366, -4.268354892730713, -4.434645175933838, -4.364510536193848, -5.139334678649902, -5.211651802062988, -4.85575532913208, -3.6680212020874023, -4.993865489959717, -5.984230995178223, -5.776572227478027, -6.801794052124023, -6.704524517059326, -7.753292083740234, -7.781363010406494, -7.046987056732178, -8.465328216552734, -9.592223167419434, -9.8331937789917, -9.31308364868164, -10.794843673706055, -10.369719505310059, -11.892854690551758, -11.556217193603516, -13.119613647460938, -12.866257667541504, -14.468029975891113, -14.29408073425293, -15.931794166564941, -15.834827423095703, -17.505586624145508, -17.48463249206543, -16.117403030395508, -18.6453800201416, -20.416658401489258, -20.494916915893555, -22.29063606262207, -22.42340850830078, -21.14980125427246, -18.51313018798828, -21.95074462890625, -24.68416976928711, -23.370380401611328, -26.152067184448242, -28.225223541259766, -29.922889709472656, -30.89512825012207, -30.91232681274414, -32.97352600097656, -34.50631332397461, -35.56045150756836, -37.430458068847656, -38.79111099243164, -38.971351623535156, -40.95563507080078, -41.212249755859375, -39.90666961669922, -43.00902557373047, -41.7252082824707, -38.9774055480957, -43.00685501098633, -46.236942291259766, -44.87086486816406, -41.99107360839844, -46.076778411865234, -43.07352066040039, 14.24731159210205, 14.364058494567871, 13.964797019958496, 14.079599380493164, 15.526520729064941, 13.997466087341309, 13.594863891601562, 14.129584312438965, 13.175332069396973, 13.689531326293945, 12.755794525146484, 13.250836372375488, 12.33254337310791, 12.810294151306152, 11.90279483795166, 12.02852725982666, 11.621094703674316, 11.759895324707031, 11.347976684570312, 11.51004409790039, 11.089213371276855, 11.288022994995117, 10.853058815002441, 11.106826782226562, 10.651590347290039, 11.22917652130127, 10.262110710144043, 10.641066551208496, 10.127187728881836, 10.698798179626465, 9.804163932800293, 10.397441864013672, 9.800763130187988, 10.372212409973145, 12.024816513061523, 9.812178611755371, 9.186883926391602, 9.624637603759766, 11.11975383758545, 9.173702239990234, 8.791208267211914, 10.102252960205078, 9.235032081604004, 9.602256774902344, 11.004692077636719, 13.010529518127441, 10.338736534118652, 12.168316841125488, 9.745804786682129, 11.381994247436523, 9.197344779968262, 8.293270111083984, 9.029199600219727, 10.332113265991211, 8.525392532348633, 7.811553955078125, 8.328146934509277, 7.667278289794922, 8.115945816040039, 7.490826606750488, 7.877608776092529, 7.272388458251953, 7.672658443450928, 7.33319091796875, 7.645315647125244, 7.077125549316406, 7.340902805328369, 6.7694220542907715, 7.184791088104248, 8.917820930480957, 7.601263523101807, 7.1260223388671875, 7.2899675369262695, 6.844161510467529, 7.474212646484375, 6.907777309417725, 7.642219543457031, 7.01216459274292, 7.878201007843018, 7.169477462768555, 7.030662536621094, 6.97844123840332, 8.073103904724121, 7.214142322540283, 6.834805011749268, 6.7157368659973145, 6.278583526611328, 6.5770697593688965, 6.0395188331604, 6.468071937561035, 5.8187785148620605, 5.413116455078125, 5.285552024841309, 4.815357208251953, 4.74419641494751, 5.347853183746338, 4.5259599685668945, 5.2427897453308105, 6.725831031799316, 5.618044376373291, 7.248331546783447, 6.096678733825684, 4.939546585083008, 3.666160821914673, 4.604979515075684, 3.271275281906128, 4.3152594566345215, 2.9351181983947754, 1.6809160709381104, 0.9655347466468811, 0.7439358830451965, 1.3089163303375244, 0.07055867463350296, 0.632910430431366, 1.5766246318817139, 0.25799182057380676, -1.0514847040176392, -0.48950424790382385, 0.3930617570877075, -0.9520876407623291, -0.087607242166996, -1.4689116477966309, -0.6378072500228882, 0.25155287981033325, -0.8568115234375]}
+{"type": "SampleBatch", "eps_id": [1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 1552982767, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAER78b71hBK+gZYYPdfCib5b8vK+ueJSPc+LAj2ovQy/ZGvyvtaGE76kBKs8dSZ9vg/l876y0K2+m4OCPMzlVD3+Xve+KhAUvq4HizzXSnG+Cdr4vlgOrr6w2Eg8P2N/PTNV/L5FfRS+B0ddPMzfZ75V0f2+nT6uvuQTEzz9U5A9vKYAvy0bCb+SKyo8/GC7PrdkA7/gYq6+3AuRPEXYnD0lIwW/ZjsVvguYnTzcgle+KuIFv7YkST1pOXY8f3j+vsyhBb+RuxW+3bmmO1tsTL50YQa/TM6uvp2Ujzqp2ME99SAIv6vSFb4X2kM7F25KvrvgCL+TwEc9wQh9utKV+r7PoAi/VdoVvm8wMLxbxkm+n2AJv2luSD3UwXC8zHf8vnwgCb9RexW+FyvJvPoBUr7S3wm/+bFKPQHF6ry7YgG/9Z4Jv660FL7HyR69Ajljvk1dCr8B9q2+TPcwvQfWbj2kGgy/x4ITvnUwLL1KoX2+dNcMv4VWrb7MekC9Ts4APTOTDr+dcAi/UOc9vXotnj7GTRG/x6esvluYJL0dVwM7xQcTvysgCL9UbiS9jECQPrzAFb+x7Dm/w1kNvWC+Dz+reBm/ldoHv4+0vrzCK4Q+PTAcv9C3Ob8eaZS8CxwLPx3nH7/uswe/o4Xtu0nxej7qnSK/XHOrvm7WGbtSY0y91FQkv98PD77kPVu76MevvvILJb91KmM9aU8nvDwWI79BwyS/t90OvtsHvLy5+bG+H3olvx8qq76b+/S8rfp+vU4wJ783GQ6+mC7/vKV6ur4x5ie/1beqvnttHb058qa9Opspv98WDb4CGyS9dqvFvtJPKr9hGG09kLtDvefsML/0Ayq/5tELvkpZfL0y5NO+7LYqv79Wqb4uII+9EpkQvm5oLL/3WAa/3eiUvYG4Az5KGC+/jAM4vwukj73Mh8s+cMYyv6jTBb97t369aymrPaJzNb9NiTe/yd53vatGtj5XHzm/+mAFv8K0Wr1WzTc9Pco7v8EfN7+xB1e98PajPtVzP7/r/QS/s8s8vfYMPDzAHEK/McQ2v//aO70QG5Q+hMRFv/KKaL+WKCS9MzARPyJrSr8idDa/hmXrvN4yhj5MEU6/iGYEv/hzwLzbySG9MLdQv+66pL6u7Ma8CtOtvuVcUr/KOQS/YIz+vPuHX73kAVW/OQ82v6u+A709n2k+CaZYv5T+A78tHOK8r5aYvdlJW7+446O+MVHuvKtkwL5o7Vy/nCD/vQbxFb1Sby2/sJBdvxNqo77OcE29uQfLvgczX7+mdwO/6+xtvbHv9b0k1GG/xKGivk/Dd71mbNy+enRjv/8IA7/tg42930YhvmETZr9xtzS/ZveTvd6g5j2msGm/OIUCv5Vaj70xz06+6Uxsv8kyNL9QoJe9J/KKPYfnb78c/wG/6NiUvYgufb4cgXK/7qkzv335nr37Y7E8/Rh2vzBPZb9uFp698uaTPg2ver9rGTO/ZUGSvZoy3rwKRH6/TsVkv85dk71RA3g+rGuBv4aTMr8ncom9hwmUvdQ0g79fZQC/G2iMvVJOxb6GfYS/7xQyv+4wnL1opOu9aUWGv7W7Y79r56C9rUgcPlKrEj3bJSU8reQjvDRjz7y2fhM9pUVSPlMwLLw1bKS+FVEkPdHzKTyrtYq8BDQCvZ8qJT3bq1I+8+qPvOvWqL4rBTY9EggyPEjyxbwryi69DOk2PSlHUz4h8My8GI6vvgXQRz1biz08zY4CvVNZbr2jwkg903k7viVTB71upmU+IsM5PTa9TDzT5+m8xBOhvTPJOj3wkzq+r8r2vEXOUT4V3Cs9OHlaPAs51bwn88a9u/MsPdXUVT6JI+W8mbzLvv8OPj3NGmc80yoTvb3b6b3PNj89ucFWPoiFHL1nAta+B2VQPQdveDxbwz693+QMvgajUT13+1c+3ghKvSWi475X6mI9/miHPLt0br2x4yu+/URkPVv6Nb4GNXy9xOPYPRW2VT3Jab6+E4hzvT3QwT7BPjc9+TU0vnqFVL00sYo9CtQoPaybvb5F+U69aOuvPrF9Cj3wjxC/mtMyvUezHj9Ndrg8xFlCv+AKAL3eBmY/kE/wO2xLEL9OvFm8h6AYP1oVgbvGl7y+xvuyur9FmT67PTm8Z1wxvl9xlztAs8w7I/9xvN+dvL5viZs7/8uZPgxbtbyEfzG+yzIwPE3FFjxfwdG8Yy2xPL02MzwZZY++OTbOvNTQMb7W4a47um+DPI6p6rxoSK88p2W5OzjGjL4bKOe8CvoxvmyXJjko2J88BNEBvVjkvL7C8g86nt6fPggKIL0yZhC/aqDeO+znGj83P0696PG8vuXLmjzPDqE+ZnpsvcR9EL/BVc48QfccP5Bbjb3PQ72+hWUZPZExqD61f5y9b0ozvrtOND2MCkQ9paujvRS5njx2Ojg9EiZsvnvgor2UlzS+IlYlPd92mz2+Gaq958aUPBeOKz3MqFC+T1upvXDLNb6/3Bo9aorQPeOgsL38eos8NDQjPST7Nr5a7q+9Ce42vsCQFD3vUQE+jT+3vZ+fv7466R49+UHcPgCUxr2vBji++yZCPf+VGT5u8M29SNVwPGxwTj1cbgK+TFbNvSN0Ob4xAUQ9yhw5PlfB1L1p9sC+TNBSPSjw+T43MeS9qeY6vsPNej0wOVk+FavrvUpIPzw/F4Y9NFF4vakw6709vVI+jpuDPar1qb6xwuK9MPkgPJEFbD0VhaG8q1vivR75UD4TaGo901yWvsj/2b0MHwY8OVlSPaAZhzzyqdm98CJAvhSzUz2YXKY+bFnhvf4S3DtBUW49A55IPQAT4b3Vx00+alRyPXYwZr7O19i93FXKPiPqXz3TGf+++qfIvT0fTD41GTc95WBBvsR9wL01n8k+0KAnPZ8n776NXLC9tJsWPwldAT2lIT+/o0OYvS0ZyT4ZZ4g8RGjjviUtiL2XJ0o+kIf+O9THFb4VF4C9BycPO4Ornjv6bBg+LQCAvWX4ST54HAA8nbYRvgDYb71u1Mg+RPeiOytx3b4Dtk+9cshJPr31cLtekw2+f5E/vQUQAjugFtO7CfEcPuBnP713rUW+pUpdu8VH4z5QOE+9P0QLO+dFtDvxwxk+wAtPvVrmST5qVws80ygQvtfkPr1uUvw6vWu6O2pIHj54vD69OLFJPmfcDzy/lAu+0JkuvQGvyD7ZY8Y7Ijjavs99Dr2feUk+fNwhu3rIBr4zv/y8dqjIPhsxp7vFpNm+S4m8vHRMFj9J4168yCg4v7qwOLzmysg+TU7lvE+s3L57u2C7hzJKPuz1Fb27zha+NVIIOm5CyT52BiK93A3nvn5TCTwnT0s+cP5GvdxuL76VYko8hfyHO04HVb0WXtg9tb5LPNHGTD60X0y9xNlPvgGjhjxSmbc7fABdvYCklj0Djoc8S0pOPuf5Vr1JTnG+qI+oPM9W6TvYR2q96PIjPVW6qTyeoD++bQBnvX7VoD5GEYs8VkjDvqtETb1ZqBY/dCcZPGQUPr7MDh29dZmPPkGouDsqNiQ89xQGvXIVxbzIOb87sAs9voANCL0/KIQ+FHkMOw2nMzyu0OW8G7c3vV3YGjvwblM+7CntvAtIsb5svdQ7kP5APGnyEr0csYC9r3XcO50yO743GBi9o41fPmxOSTtAOMG+2DUGvUmE/z4Hq5K7YnASv8GnurwM/0c/UmOCvK3RwL51oeq7e4P2Pg8XwLwAuzm+PdAhO48EPz6Uzt28PpxgPHooyzshyNe9k4/bvG7bOb6iG4Y7FNBBPklM+bxQM1489hIBPKEj0b10E/e8NAg6vj45vzsUrEU+qWsKvVX9Wjzt3R481knIvVtTCb1LQjq+OaT9O/WtSj7wORi9LuhWPJ6tPzzHCL293CYXvRgQVT7GbiE88TXDvlUbBr3D21E8zv0RO2kZr722DgW9ZeNUPiq2BzqpRMG+hQ3ovLY2UTxba+a771Ktve/15bwGoDq+B/EOvNvDUj4M6QG9IedUPE7+lruffre9iNgAvTZuOr4vttG7uXdOPqHCD73eelc8CiUbu46Zvr3Rrg69dE06vnWQirtCpEs+S5YdvR8GWTz8vIO5bdvCvYCAHL25PDq+BC0Nu5MySj6jZiu9MJZZPCpG6zrdaMS9IVAqvWU7Or66CgG59RRKPik2Ob3bMVk8hJl6O5ZUw70mIDi9vlJVPiot+zo8Eca+Sg8nvTzZVzxpu767Vp6/vQH7Jb1eYVU+zAz8uxSyxr756BS9lwJbPLAwfbzbWsi9lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAFvy8r654lI9z4sCPai9DL9ka/K+1oYTvqQEqzx1Jn2+D+XzvrLQrb6bg4I8zOVUPf5e974qEBS+rgeLPNdKcb4J2vi+WA6uvrDYSDw/Y389M1X8vkV9FL4HR108zN9nvlXR/b6dPq6+5BMTPP1TkD28pgC/LRsJv5IrKjz8YLs+t2QDv+Birr7cC5E8RdicPSUjBb9mOxW+C5idPNyCV74q4gW/tiRJPWk5djx/eP6+zKEFv5G7Fb7duaY7W2xMvnRhBr9Mzq6+nZSPOqnYwT31IAi/q9IVvhfaQzsXbkq+u+AIv5PARz3BCH260pX6vs+gCL9V2hW+bzAwvFvGSb6fYAm/aW5IPdTBcLzMd/y+fCAJv1F7Fb4XK8m8+gFSvtLfCb/5sUo9AcXqvLtiAb/1ngm/rrQUvsfJHr0COWO+TV0KvwH2rb5M9zC9B9ZuPaQaDL/HghO+dTAsvUqhfb501wy/hVatvsx6QL1OzgA9M5MOv51wCL9Q5z29ei2ePsZNEb/Hp6y+W5gkvR1XAzvFBxO/KyAIv1RuJL2MQJA+vMAVv7HsOb/DWQ29YL4PP6t4Gb+V2ge/j7S+vMIrhD49MBy/0Lc5vx5plLwLHAs/Hecfv+6zB7+jhe27SfF6PuqdIr9cc6u+btYZu1JjTL3UVCS/3w8PvuQ9W7vox6++8gslv3UqYz1pTye8PBYjv0HDJL+33Q6+2we8vLn5sb4feiW/Hyqrvpv79Lyt+n69TjAnvzcZDr6YLv+8pXq6vjHmJ7/Vt6q+e20dvTnypr06mym/3xYNvgIbJL12q8W+0k8qv2EYbT2Qu0O95+wwv/QDKr/m0Qu+Sll8vTLk077stiq/v1apvi4gj70SmRC+bmgsv/dYBr/d6JS9gbgDPkoYL7+MAzi/C6SPvcyHyz5wxjK/qNMFv3u3fr1rKas9onM1v02JN7/J3ne9q0a2PlcfOb/6YAW/wrRavVbNNz09yju/wR83v7EHV73w9qM+1XM/v+v9BL+zyzy99gw8PMAcQr8xxDa//9o7vRAblD6ExEW/8opov5YoJL0zMBE/ImtKvyJ0Nr+GZeu83jKGPkwRTr+IZgS/+HPAvNvJIb0wt1C/7rqkvq7sxrwK062+5VxSv8o5BL9gjP68+4dfveQBVb85Dza/q74DvT2faT4Jpli/lP4Dvy0c4ryvlpi92Ulbv7jjo74xUe68q2TAvmjtXL+cIP+9BvEVvVJvLb+wkF2/E2qjvs5wTb25B8u+BzNfv6Z3A7/r7G29se/1vSTUYb/EoaK+T8N3vWZs3L56dGO//wgDv+2Djb3fRiG+YRNmv3G3NL9m95O93qDmPaawab84hQK/lVqPvTHPTr7pTGy/yTI0v1Cgl70n8oo9h+dvvxz/Ab/o2JS9iC59vhyBcr/uqTO/ffmevftjsTz9GHa/ME9lv24Wnr3y5pM+Da96v2sZM79lQZK9mjLevApEfr9OxWS/zl2TvVEDeD6sa4G/hpMyvydyib2HCZS91DSDv19lAL8baIy9Uk7FvoZ9hL/vFDK/7jCcvWik671pRYa/tbtjv2vnoL2tSBw+aYyIv5GEMb8Sp5q9J7UnvrZ+Ez2lRVI+UzAsvDVspL4VUSQ90fMpPKu1irwENAK9nyolPdurUj7z6o+869aovisFNj0SCDI8SPLFvCvKLr0M6TY9KUdTPiHwzLwYjq++BdBHPVuLPTzNjgK9U1luvaPCSD3TeTu+JVMHvW6mZT4iwzk9Nr1MPNPn6bzEE6G9M8k6PfCTOr6vyva8Rc5RPhXcKz04eVo8CznVvCfzxr278yw91dRVPokj5byZvMu+/w4+Pc0aZzzTKhO9vdvpvc82Pz25wVY+iIUcvWcC1r4HZVA9B294PFvDPr3f5Ay+BqNRPXf7Vz7eCEq9JaLjvlfqYj3+aIc8u3RuvbHjK779RGQ9W/o1vgY1fL3E49g9FbZVPclpvr4TiHO9PdDBPsE+Nz35NTS+eoVUvTSxij0K1Cg9rJu9vkX5Tr1o668+sX0KPfCPEL+a0zK9R7MeP012uDzEWUK/4AoAvd4GZj+QT/A7bEsQv068WbyHoBg/WhWBu8aXvL7G+7K6v0WZPrs9ObxnXDG+X3GXO0CzzDsj/3G83528vm+Jmzv/y5k+DFu1vIR/Mb7LMjA8TcUWPF/B0bxjLbE8vTYzPBllj745Ns681NAxvtbhrju6b4M8jqnqvGhIrzynZbk7OMaMvhso57wK+jG+bJcmOSjYnzwE0QG9WOS8vsLyDzqe3p8+CAogvTJmEL9qoN477OcaPzc/Tr3o8by+5cuaPM8OoT5memy9xH0Qv8FVzjxB9xw/kFuNvc9Dvb6FZRk9kTGoPrV/nL1vSjO+u040PYwKRD2lq6O9FLmePHY6OD0SJmy+e+CivZSXNL4iViU933abPb4Zqr3nxpQ8F44rPcyoUL5PW6m9cMs1vr/cGj1qitA946Cwvfx6izw0NCM9JPs2vlrur70J7ja+wJAUPe9RAT6NP7e9n5+/vjrpHj35Qdw+AJTGva8GOL77JkI9/5UZPm7wzb1I1XA8bHBOPVxuAr5MVs29I3Q5vjEBRD3KHDk+V8HUvWn2wL5M0FI9KPD5Pjcx5L2p5jq+w816PTA5WT4Vq+u9Skg/PD8Xhj00UXi9qTDrvT29Uj6Om4M9qvWpvrHC4r0w+SA8kQVsPRWFobyrW+K9HvlQPhNoaj3TXJa+yP/ZvQwfBjw5WVI9oBmHPPKp2b3wIkC+FLNTPZhcpj5sWeG9/hLcO0FRbj0Dnkg9ABPhvdXHTT5qVHI9djBmvs7X2L3cVco+I+pfPdMZ/776p8i9PR9MPjUZNz3lYEG+xH3AvTWfyT7QoCc9nyfvvo1csL20mxY/CV0BPaUhP7+jQ5i9LRnJPhlniDxEaOO+JS2IvZcnSj6Qh/471McVvhUXgL0HJw87g6ueO/psGD4tAIC9ZfhJPngcADydthG+ANhvvW7UyD5E96I7K3HdvgO2T71yyEk+vfVwu16TDb5/kT+9BRACO6AW07sJ8Rw+4Gc/vXetRb6lSl27xUfjPlA4T70/RAs750W0O/HDGT7AC0+9WuZJPmpXCzzTKBC+1+Q+vW5S/Dq9a7o7akgePni8Pr04sUk+Z9wPPL+UC77QmS69Aa/IPtljxjsiONq+z30OvZ95ST583CG7esgGvjO//Lx2qMg+GzGnu8Wk2b5Liby8dEwWP0njXrzIKDi/urA4vObKyD5NTuW8T6zcvnu7YLuHMko+7PUVvbvOFr41Ugg6bkLJPnYGIr3cDee+flMJPCdPSz5w/ka93G4vvpViSjyF/Ic7TgdVvRZe2D21vks80cZMPrRfTL3E2U++AaOGPFKZtzt8AF29gKSWPQOOhzxLSk4+5/lWvUlOcb6oj6g8z1bpO9hHar3o8iM9VbqpPJ6gP75tAGe9ftWgPkYRizxWSMO+q0RNvVmoFj90Jxk8ZBQ+vswOHb11mY8+Qai4Oyo2JDz3FAa9chXFvMg5vzuwCz2+gA0IvT8ohD4UeQw7DaczPK7Q5bwbtze9XdgaO/BuUz7sKe28C0ixvmy91DuQ/kA8afISvRyxgL2vddw7nTI7vjcYGL2jjV8+bE5JO0A4wb7YNQa9SYT/PgerkrticBK/wae6vAz/Rz9SY4K8rdHAvnWh6rt7g/Y+DxfAvAC7Ob490CE7jwQ/PpTO3bw+nGA8eijLOyHI172Tj9u8bts5vqIbhjsU0EE+SUz5vFAzXjz2EgE8oSPRvXQT97w0CDq+Pjm/OxSsRT6pawq9Vf1aPO3dHjzWSci9W1MJvUtCOr45pP079a1KPvA5GL0u6FY8nq0/PMcIvb3cJhe9GBBVPsZuITzxNcO+VRsGvcPbUTzO/RE7aRmvvbYOBb1l41Q+KrYHOqlEwb6FDei8tjZRPFtr5rvvUq297/XlvAagOr4H8Q6828NSPgzpAb0h51Q8Tv6Wu59+t72I2AC9Nm46vi+20bu5d04+ocIPvd56VzwKJRu7jpm+vdGuDr10TTq+dZCKu0KkSz5Llh29HwZZPPy8g7lt28K9gIAcvbk8Or4ELQ27kzJKPqNmK70wllk8KkbrOt1oxL0hUCq9ZTs6vroKAbn1FEo+KTY5vdsxWTyEmXo7llTDvSYgOL2+UlU+Ki37OjwRxr5KDye9PNlXPGm7vrtWnr+9AfslvV5hVT7MDPy7FLLGvvnoFL2XAls8sDB9vNtayL2k0BO98uI5vp2fjrwff0I+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.24131493270397186, 0.895858645439148, 0.7556158900260925, 0.5627695322036743, 0.7457023859024048, 0.5738638639450073, 0.7374980449676514, 0.4179460108280182, 0.8396899104118347, 0.6064826846122742, 0.2953885793685913, 0.8750197887420654, 0.7103742361068726, 0.6078583002090454, 0.29195210337638855, 0.8786695599555969, 0.27571606636047363, 0.8873761296272278, 0.25129878520965576, 0.8985724449157715, 0.7796880006790161, 0.4905412793159485, 0.8019941449165344, 0.5516629815101624, 0.746748685836792, 0.5776807069778442, 0.2728274464607239, 0.8870747089385986, 0.2769257128238678, 0.8864772319793701, 0.7282561659812927, 0.4269391894340515, 0.18631026148796082, 0.9157050251960754, 0.8317717909812927, 0.3750867247581482, 0.8453786373138428, 0.3430943787097931, 0.13919605314731598, 0.935564398765564, 0.8849738836288452, 0.7536598443984985, 0.49243804812431335, 0.7689984440803528, 0.5307973623275757, 0.7431386709213257, 0.5617672801017761, 0.7192195653915405, 0.5866039395332336, 0.302573025226593, 0.8675600290298462, 0.6909656524658203, 0.3980485200881958, 0.8212164640426636, 0.6238670945167542, 0.655376672744751, 0.3653869032859802, 0.16502514481544495, 0.9216912388801575, 0.8561554551124573, 0.29327332973480225, 0.8727355003356934, 0.7451703548431396, 0.4868038594722748, 0.7732892632484436, 0.43962687253952026, 0.7995861172676086, 0.6083648800849915, 0.6436713337898254, 0.642200767993927, 0.6060551404953003, 0.32846659421920776, 0.8494446873664856, 0.710797905921936, 0.5141618847846985, 0.46378377079963684, 0.8353226184844971, 0.4367847442626953, 0.8474537134170532, 0.4021541178226471, 0.8611968159675598, 0.6393392086029053, 0.7144891023635864, 0.6649620532989502, 0.6920929551124573, 0.3124138116836548, 0.889151394367218, 0.27347052097320557, 0.9007779359817505, 0.23188798129558563, 0.9121896028518677, 0.8089476823806763, 0.5409591794013977, 0.772519588470459, 0.5850352644920349, 0.25397947430610657, 0.09775908291339874, 0.950168251991272, 0.9098732471466064, 0.771769642829895, 0.5680984854698181, 0.7832019329071045, 0.4519575536251068, 0.8285578489303589, 0.4541161358356476, 0.8291754722595215, 0.5492079854011536, 0.20560041069984436, 0.923535943031311, 0.17992544174194336, 0.9317284822463989, 0.8505476117134094, 0.6034125089645386, 0.7351111769676208, 0.6447386741638184, 0.7040464878082275, 0.6819301247596741, 0.6704595685005188, 0.2843828797340393, 0.9047502279281616, 0.7631457448005676, 0.5617387890815735, 0.20330935716629028, 0.9253365397453308, 0.8364806771278381, 0.590406596660614, 0.7320359349250793, 0.642178475856781, 0.6925275325775146, 0.31479865312576294, 0.8909397125244141, 0.7377358675003052, 0.4197559058666229, 0.832724392414093, 0.4501296281814575, 0.17669673264026642, 0.924437940120697, 0.8319843411445618, 0.5569643378257751, 0.7775733470916748, 0.44905632734298706, 0.8366489410400391, 0.5699752569198608, 0.23228183388710022, 0.9086724519729614, 0.7788719534873962, 0.55019611120224, 0.7831694483757019, 0.45721670985221863, 0.8337205648422241, 0.43971604108810425, 0.15513934195041656, 0.9351429343223572, 0.8691209554672241, 0.338882714509964, 0.8880844116210938, 0.7198560833930969, 0.6208048462867737, 0.7629486322402954, 0.5596981644630432, 0.8015178442001343, 0.5096896886825562, 0.20435230433940887, 0.9162795543670654, 0.7844460010528564, 0.5726713538169861, 0.7668732404708862, 0.40077295899391174, 0.8630709648132324, 0.6452500224113464, 0.29033249616622925, 0.10666076093912125, 0.9473624229431152, 0.900137186050415, 0.7340761423110962, 0.6292524337768555, 0.740675151348114, 0.6198363900184631, 0.7485766410827637, 0.608159601688385, 0.7577989101409912, 0.4061185419559479, 0.8529402017593384, 0.3982008397579193, 0.8579545617103577, 0.619327187538147, 0.7435746788978577, 0.6222450137138367, 0.7422121167182922, 0.6229832172393799, 0.7424502372741699, 0.6216181516647339, 0.7442542314529419, 0.6181512475013733, 0.7476000189781189, 0.3874887228012085, 0.8613584637641907, 0.37104445695877075, 0.8689646124839783, 0.6546879410743713], "advantages": [7.580617904663086, 8.718170166015625, 7.821845054626465, 6.710877418518066, 7.57504415512085, 6.450265884399414, 7.2691426277160645, 6.134671688079834, 4.556761264801025, 5.388761520385742, 6.112790584564209, 7.11945104598999, 6.005244255065918, 4.8401079177856445, 5.466280937194824, 6.50697660446167, 5.222599983215332, 6.333145618438721, 4.92026424407959, 6.132260322570801, 4.581396102905273, 3.320007801055908, 3.9024581909179688, 2.618546962738037, 1.3531938791275024, 1.6328734159469604, 0.42050260305404663, -1.108751893043518, -0.7711300253868103, -2.2222580909729004, -1.945410966873169, -1.7926582098007202, -1.2718160152435303, 0.16502735018730164, -1.7258199453353882, -3.1616945266723633, -2.4941656589508057, -4.030231952667236, -3.2669949531555176, -1.4177271127700806, -3.6995935440063477, -5.53087043762207, -6.868790626525879, -7.898977279663086, -8.268068313598633, -9.268308639526367, -9.646649360656738, -10.629170417785645, -11.01728343963623, -11.992298126220703, -12.800217628479004, -13.534740447998047, -13.965821266174316, -13.630119323730469, -15.145596504211426, -16.13077735900879, -16.555500030517578, -16.08648109436035, -14.561285018920898, -16.86104393005371, -18.68360710144043, -17.8935546875, -19.820213317871094, -21.19304847717285, -21.2159423828125, -22.66560935974121, -22.569801330566406, -24.100130081176758, -25.109458923339844, -25.794538497924805, -26.832387924194336, -27.45136260986328, -27.0532283782959, -28.74446678161621, -29.91606330871582, 20.753456115722656, 20.834613800048828, 20.694181442260742, 20.807846069335938, 20.65848159790039, 20.82159996032715, 20.657909393310547, 21.5726261138916, 20.457366943359375, 21.328603744506836, 20.27153205871582, 20.618633270263672, 20.35637664794922, 20.83203887939453, 20.537174224853516, 21.197174072265625, 20.864004135131836, 21.806888580322266, 23.83989715576172, 21.514575958251953, 23.422138214111328, 25.847166061401367, 27.924537658691406, 24.49230194091797, 21.513973236083984, 19.95418357849121, 21.1712589263916, 19.751596450805664, 19.605371475219727, 19.815196990966797, 19.73043441772461, 19.922773361206055, 20.94406509399414, 22.653141021728516, 20.381067276000977, 21.9349365234375, 19.80426597595215, 18.8150634765625, 18.747034072875977, 18.672504425048828, 18.58218002319336, 18.494504928588867, 18.377090454101562, 18.2767391204834, 18.862350463867188, 17.799306869506836, 17.587078094482422, 17.441600799560547, 17.999296188354492, 16.841815948486328, 16.490354537963867, 16.861783981323242, 16.066755294799805, 16.38523292541504, 15.610090255737305, 15.543232917785645, 14.969192504882812, 15.117175102233887, 16.25647735595703, 14.667774200439453, 15.825532913208008, 18.732955932617188, 15.693540573120117, 14.018409729003906, 13.383036613464355, 13.4642972946167, 14.775519371032715, 13.10832405090332, 12.505882263183594, 12.744063377380371, 11.837752342224121, 11.855700492858887, 11.318857192993164, 11.307025909423828, 12.451415061950684, 10.928651809692383, 12.139447212219238, 15.426178932189941, 12.20697021484375, 10.523116111755371, 12.132685661315918, 10.362631797790527, 9.812178611755371, 10.042710304260254, 9.477104187011719, 9.794739723205566, 9.208287239074707, 9.648468017578125, 11.064179420471191, 8.870158195495605, 8.055410385131836, 8.38062572479248, 7.614809513092041, 8.114533424377441, 7.436225891113281, 7.744305610656738, 8.997642517089844, 10.93917179107666, 7.903730392456055, 6.137599468231201, 5.561269283294678, 5.646867275238037, 5.077147483825684, 5.13960599899292, 4.572854518890381, 4.613318920135498, 4.045536518096924, 4.490211009979248, 3.6949615478515625, 4.19376277923584, 3.3772928714752197, 3.421656608581543, 2.8677380084991455, 2.8865480422973633, 2.3476696014404297, 2.3416287899017334, 1.814570426940918, 1.7843564748764038, 1.2657872438430786, 1.2121046781539917, 0.698505699634552, 1.2680789232254028, 0.3474351763725281, 0.999905526638031, 0.0529327392578125]}
+{"type": "SampleBatch", "eps_id": [896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 896829513, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAKTQE73y4jm+nZ+OvB9/Qj6ZryK9/rNiPBECX7yFkN29a40hvftuOb6tOoG8/X44PhhjML3QlGk8b2tHvDuH8L0dOC+9gpRWPnjnbbxk8NO+gw0evRXTbzzIxbq89OQAvonaHL3jHlc+S2XPvEPw2b7dpAu9JRN7PGmRCr2AcxC+fWMKvaqZN77FHxa9NCIQPp8TGb2gxoU86pcKvfotJ74ovRe9EZI2vsD3F72TzvI9NFgmvVQQjjxoQQ69tQw+voXsJL1ZhTW+oHUdvS54xD0RcjO9zF6+vseZFb30osA+oudRva5sNL7Fju28KAOUPbpWYL1V5b2+erfhvN8Ztj7cuH690pgzvsJxp7yE6149gYuGvcIDpDwOh568N4J6vpG5hb2+AzO+5JvGvLKJKz2t4oy9oxmpPFi/v7zeRoS+OgqMvTtRMr52E+q8IQHcPDIsk71w2Ly+Cq3lvDvrnj7AR6K9HX0xvmfSsrxuaBM8PGGpvax7vL4KWbG8s+SWPl51uL2wHBC/2w+BvA6bFD8zhM+94zW8vjXRh7uO1pA+wJLevUuOML5eTcY6Wd41vK6i5b0DMby+CDSpOhtqkD7YsPS9zpgwvr4m4zvwYCe8MsH7vQnYuDzJdNw71vWZvpjU+r0DzDC+lhg7OhKMwbt/8wC+X068vuUgHDra8ZI+wXsIvkEbEL/2ms87gHIUPw0DFL4yW7y+V+iSPPIQlD7Sixu+kSwxvuxJwjxRYBE79BYfvgIysjz3psI8vtaQvuikHr5+3jG+xE2UPGvYjDyZMyK+rve8vuUelzx3jaE+oMIpvnNnMr5J0co8GU7rPA1ULb4eSL2+DIbPPKaBqD5M5jS+FCMzvgy5Aj08YTY9e3s4vkF6oTzVXgY9/qBzviIUOL6Mfls+oMLlPLYjBb9SsDO++1+aPAyNkDz271++hk0zvsbWWj4mcVk8FXkBvxHtLr6SOdE+x91OO1RSS7+bjia+xplaPvqIULwYJAC/Xi8ivuTalzwcR7q84v1Yvi7OIb6PezS+F//cvLGQlj1BaiW+xqidPITz0LyNA2m+WgUlvjjgWz7DO/a8NTYHv5afIL6vIqQ8bWImvc39er6KNiC+o6syvrt2Or3sRA09VckjvojjvL5voze98PufPo5XK75ZWTG+fAoevYROxDuV4y6+d0i8vtmMHb33j5I+mWs2vsU4ML6nGQa9HPmVvNvxOb4DYsA8lZkHvR9spL67djm+NkIvvlPoIb3dDSC9Dvg8vv86u77NGyW9zah2Pkt1RL7NaQ+/N2ARvahEBT9m7k++gq26vsZ1zbz5IV4++mVXvmAxLb4366m8KSKrvbrcWr7yV7q+B5y3vL5dTz7iUGK+uo8svk9ulrzr/Ma9ZsRlvkcQ3DyVWaa8qonKvo83Zb73/iu+einnvBH9370tqGi+DLC5vsMU+bw0djI+nhVwvoMjK774hty8IegCvtmBc77oRLm+6XjxvBv7Hz4B63q+g3kOvxjg17xhAuE+cSiDvnDcuL5V34+8+ewNPu7ahr4Oqim+HFRyvORmI75GjYi+5J+4vv9Ok7xzfQM+jT6Mvm8vKb5Viny8dvgtviVlp7zjW4Y85cE2vWOcoLw6taS8lTlZPg9dOL3c7Ka+svOBvNLWkDxQElO9gRNEvT0cfrxMoFo+Of5WvUx0tr5iJji8MP6cPI0vdL0BOaW9xt4xvI5BM75vy3q98T1FPm87a7ytG6s86wNrvWgq871JY2S8Tocxvu+9dL2gCR8+NpmOvCs0vL7YBGi9R2zbPt7Syrx81C++R+lEvfjV8j3d9Oa8aSnFPKQyO705akG+ZQPjvOx1Lr7Hq0q9A1u2PU7t/rxzRNA8dWBDveQVYL75wvq8ioBiPrpNVb1Iswa/c4XWvKri2zwwNIC9EC2AvqIf0rxpWSu+PHWKvTPcszwaiu286QbrPAOPib0lIZW+w9bovBxiKb4vfZW9IvWnvFr4Ab13Bbi+K1SWveI+fj7daR+9TT8nvrIojL0Rboi9F8ssvcT9tr434469UppQPmkSSr0MOyW+H4uGvUeP4b1WSle9e+MNPf0Ni71hedi+3XNUvapII75hX5y9NO0bvuuDYb3vMxY9EZyivfCh777hgl693AQhvr7Htb08Ok6+jGRrvVe1s76DB769QQt+PbMShL02Yh6+KX27vV9RhL6NaIq9QIQqPQcTxr2KGhS/B7SIvUapG75bxd29aLiivv/tjr1s3bC+3snqvU51er0xFJ29UOsJvwpL7b0Y60U+XSWzvclmO79bYOW9PQLlPlQh0b3B52y/Qg7TvQHiMz/4CPe9SKE6v0JGtr0+QcI+q3IKvtBiCL/ru6a9U+t1PdpbFb64Uqy+XUakvfysg75xQBy+o8QPvhXPrr2i9BK/iCAfvsEfq75iUsa9D22evtj4Jb7bIAe/8/7SvRzSR71FyDC+a6o4v33+1L1Au1Y+Oo4/vk5gBr+kZ8y9BkLpvUBOSr6A7Te/6xHRvfszFT4XBVm+oKQFvxUay705nTW+GrZjvuS7pr7PXdK9Pcz/vnVhar556QS/jNTmvQnDdr5+A3W+omg2v2Sz8L0KlWc8nc2BvqMUBL8tH/C9TFugvh4Wh74CjzW/SPP8vc1mdL1HWY6+lQFnv/Nk/71RPEQ+yJaXvvWoNL9/i/e9B/EMvr3Qnr7MUgK/vS79vWmH7r5AB6S+7MYzv+ghCL5LuVu+KzirvhUvZb/khgy+SckIPQNjtL72SIu/ztcLvlxFjz6Rh7++Gvujv7YcBr5myQY/5aXMvrPNir+OqPa9LkpIPpjA177ciKO/l6Xuvfpe5T7I1eS+1Ua8vxJM3L1tqDM/sOXzvgkio79Ijb+9KSrBPlN5AL937Lu/RRmwvaqZIz+q/Qe/GM+ivzTslb02JKQ+1IAOv1Kku7+Wyoi9gN0WP0gCFr8bjqK/S05hvat/jT7Yghy/G3yJv4KqSr2YxQe9sAIiv4Jdor+pYU29GF55Pk+BKL+qPru/nW45vUQdBT+y/i+/BDGiv+PVDr07gFo+iXs2v4Enib/6tfq8WoS4vf/3O791EqK/cbwEvRtnRT6dc0K/BAqJv0Xj6bzwLeG95e5Hv/71ob/x5vu8b8ExPmBpTr9E7oi/E3bfvG+4A76M41O/4Nqhv1iJ9LxYDB8+8Vxav4vTiL+6Ftu8ryUWvgvWX79rwKG/whzzvInMDD5hTma/2Ku6v6KV3LyIYtc+5MVtv++lob9NqZe88/30PSs9dL/Xlrq/3g+EvPwU0D7Ys3u/t5Whv5XzArwqmN49PBWBvzyYiL86rL67NPs+vprQg79Aj6G/TXMcvF+u1T3JC4e/eoS6v9eF9LtCuck+wcaKv0eHob+A79o5Sa7KPcgBjr87gbq/KRUdO1+XyD6vvJG/GXzTvyumJzxrai8/ffeVvwyHur8xF8Q81aLKPoKymb+vmKG/kncCPcPH4j3i7Zy/6aC6v82JCz0Gm9M+bKmgv4G3ob8qZS09p7IGPmnlo79u0oi/xis4PRgIF77woaa/YN1fv6YWLD3G79m+CN+ovxj7iL/3Nwk9B9P1vWCcq78KBKK/csX+PBxsOz7l2a6/TxmJvyJhDj1TJsy915exv9pgYL+kNgY9hSzDvkDWs7/qOIm/ovjNPBt/oL3UlLa/Lj6iv6ohwTzqeGM+g9O5v7lPib/uhuU8/QyBvYySvL9EVqK/+DPbPPoYdD620b+/NVy7vxohAT2xFAo//5DDv21xor+vUC09XnqDPrTQxr8dfru/BlpCPQ8BED+rkMq/MZuiv9hucD1i/5E+NtHNv2m+ib905YM9uiQ9PHWS0L9m1KK/gV6EPULNpT4m1NO/qfqJvyCikT1821U9mpbWvxlJYr+ZxZM9wm1evuTZ2L/JnTC/7t+KPYD7+L4Hntq/f8tiv4rpbT29LzG+oOLcv3sRMb/CvF89fcjkvuun3r+8NGO/zSE7PZPADL6R7eC/j6iKvzLfLz0+WS0+f7Pjv4yHY79gvT09+Frgvfn55b8/0oq/+sM0PXMeSj69wOi/v9+jv1/vRD1t7wA/xgfsvzz9ir+/MW49CPBnPmbP7r9zQGS/6l+APZuuQb25F/G/gzWLvy3gfD0nbIc+eeDzv6m0ZL+LRYk9LJUEvPUp9r9icYu/sPCIPRYknD7o8/i/qzFlv3RulT2cpgs9pD77v26HM7/205Y9Y1Zwvj0K/b+q3QG/6DaNPdjWAL+yVv6/HFygvk0zcT1tAUa/lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAJmvIr3+s2I8EQJfvIWQ3b1rjSG9+245vq06gbz9fjg+GGMwvdCUaTxva0e8O4fwvR04L72ClFY+eOdtvGTw076DDR69FdNvPMjFurz05AC+idocveMeVz5LZc+8Q/DZvt2kC70lE3s8aZEKvYBzEL59Ywq9qpk3vsUfFr00IhA+nxMZvaDGhTzqlwq9+i0nvii9F70Rkja+wPcXvZPO8j00WCa9VBCOPGhBDr21DD6+hewkvVmFNb6gdR29LnjEPRFyM73MXr6+x5kVvfSiwD6i51G9rmw0vsWO7bwoA5Q9ulZgvVXlvb56t+G83xm2Pty4fr3SmDO+wnGnvITrXj2Bi4a9wgOkPA6Hnrw3gnq+kbmFvb4DM77km8a8sokrPa3ijL2jGak8WL+/vN5GhL46Coy9O1EyvnYT6rwhAdw8MiyTvXDYvL4KreW8O+uePsBHor0dfTG+Z9KyvG5oEzw8Yam9rHu8vgpZsbyz5JY+XnW4vbAcEL/bD4G8DpsUPzOEz73jNby+NdGHu47WkD7Akt69S44wvl5NxjpZ3jW8rqLlvQMxvL4INKk6G2qQPtiw9L3OmDC+vibjO/BgJ7wywfu9Cdi4PMl03DvW9Zm+mNT6vQPMML6WGDs6EozBu3/zAL5fTry+5SAcOtrxkj7Bewi+QRsQv/aazzuAchQ/DQMUvjJbvL5X6JI88hCUPtKLG76RLDG+7EnCPFFgETv0Fh++AjKyPPemwjy+1pC+6KQevn7eMb7ETZQ8a9iMPJkzIr6u97y+5R6XPHeNoT6gwim+c2cyvknRyjwZTus8DVQtvh5Ivb4Mhs88poGoPkzmNL4UIzO+DLkCPTxhNj17ezi+QXqhPNVeBj3+oHO+IhQ4vox+Wz6gwuU8tiMFv1KwM777X5o8DI2QPPbvX76GTTO+xtZaPiZxWTwVeQG/Ee0uvpI50T7H3U47VFJLv5uOJr7GmVo++ohQvBgkAL9eLyK+5NqXPBxHurzi/Vi+Ls4hvo97NL4X/9y8sZCWPUFqJb7GqJ08hPPQvI0Dab5aBSW+OOBbPsM79rw1Nge/lp8gvq8ipDxtYia9zf16voo2IL6jqzK+u3Y6vexEDT1VySO+iOO8vm+jN73w+58+jlcrvllZMb58Ch69hE7EO5XjLr53SLy+2YwdvfePkj6Zaza+xTgwvqcZBr0c+ZW82/E5vgNiwDyVmQe9H2ykvrt2Ob42Qi++U+ghvd0NIL0O+Dy+/zq7vs0bJb3NqHY+S3VEvs1pD783YBG9qEQFP2buT76Crbq+xnXNvPkhXj76ZVe+YDEtvjfrqbwpIqu9utxavvJXur4HnLe8vl1PPuJQYr66jyy+T26WvOv8xr1mxGW+RxDcPJVZpryqicq+jzdlvvf+K756Kee8Ef3fvS2oaL4MsLm+wxT5vDR2Mj6eFXC+gyMrvviG3Lwh6AK+2YFzvuhEub7pePG8G/sfPgHrer6DeQ6/GODXvGEC4T5xKIO+cNy4vlXfj7z57A0+7tqGvg6qKb4cVHK85GYjvkaNiL7kn7i+/06TvHN9Az6NPoy+by8pvlWKfLx2+C2+qu+Nvme89jwAG5q83E7vvjq1pLyVOVk+D104vdzspr6y84G80taQPFASU72BE0S9PRx+vEygWj45/la9THS2vmImOLww/pw8jS90vQE5pb3G3jG8jkEzvm/Ler3xPUU+bztrvK0bqzzrA2u9aCrzvUljZLxOhzG+7710vaAJHz42mY68KzS8vtgEaL1HbNs+3tLKvHzUL75H6US9+NXyPd305rxpKcU8pDI7vTlqQb5lA+O87HUuvserSr0DW7Y9Tu3+vHNE0Dx1YEO95BVgvvnC+ryKgGI+uk1VvUizBr9zhda8quLbPDA0gL0QLYC+oh/SvGlZK748dYq9M9yzPBqK7bzpBus8A4+JvSUhlb7D1ui8HGIpvi99lb0i9ae8WvgBvXcFuL4rVJa94j5+Pt1pH71NPye+siiMvRFuiL0Xyyy9xP22vjfjjr1SmlA+aRJKvQw7Jb4fi4a9R4/hvVZKV7174w09/Q2LvWF52L7dc1S9qkgjvmFfnL007Ru+64Nhve8zFj0RnKK98KHvvuGCXr3cBCG+vse1vTw6Tr6MZGu9V7WzvoMHvr1BC349sxKEvTZiHr4pfbu9X1GEvo1oir1AhCo9BxPGvYoaFL8HtIi9RqkbvlvF3b1ouKK+/+2OvWzdsL7eyeq9TnV6vTEUnb1Q6wm/CkvtvRjrRT5dJbO9yWY7v1tg5b09AuU+VCHRvcHnbL9CDtO9AeIzP/gI971IoTq/Qka2vT5Bwj6rcgq+0GIIv+u7pr1T63U92lsVvrhSrL5dRqS9/KyDvnFAHL6jxA++Fc+uvaL0Er+IIB++wR+rvmJSxr0PbZ6+2PglvtsgB7/z/tK9HNJHvUXIML5rqji/ff7UvUC7Vj46jj++TmAGv6RnzL0GQum9QE5KvoDtN7/rEdG9+zMVPhcFWb6gpAW/FRrLvTmdNb4atmO+5Lumvs9d0r09zP++dWFqvnnpBL+M1Oa9CcN2vn4Ddb6iaDa/ZLPwvQqVZzydzYG+oxQEvy0f8L1MW6C+HhaHvgKPNb9I8/y9zWZ0vUdZjr6VAWe/82T/vVE8RD7Ilpe+9ag0v3+L970H8Qy+vdCevsxSAr+9Lv29aYfuvkAHpL7sxjO/6CEIvku5W74rOKu+FS9lv+SGDL5JyQg9A2O0vvZIi7/O1wu+XEWPPpGHv74a+6O/thwGvmbJBj/lpcy+s82Kv46o9r0uSkg+mMDXvtyIo7+Xpe69+l7lPsjV5L7VRry/EkzcvW2oMz+w5fO+CSKjv0iNv70pKsE+U3kAv3fsu79FGbC9qpkjP6r9B78Yz6K/NOyVvTYkpD7UgA6/UqS7v5bKiL2A3RY/SAIWvxuOor9LTmG9q3+NPtiCHL8bfIm/gqpKvZjFB72wAiK/gl2iv6lhTb0YXnk+T4Eov6o+u7+dbjm9RB0FP7L+L78EMaK/49UOvTuAWj6Jeza/gSeJv/q1+rxahLi9//c7v3USor9xvAS9G2dFPp1zQr8ECom/RePpvPAt4b3l7ke//vWhv/Hm+7xvwTE+YGlOv0TuiL8Tdt+8b7gDvozjU7/g2qG/WIn0vFgMHz7xXFq/i9OIv7oW27yvJRa+C9Zfv2vAob/CHPO8icwMPmFOZr/Yq7q/opXcvIhi1z7kxW2/76Whv02pl7zz/fQ9Kz10v9eWur/eD4S8/BTQPtize7+3laG/lfMCvCqY3j08FYG/PJiIvzqsvrs0+z6+mtCDv0CPob9Ncxy8X67VPckLh796hLq/14X0u0K5yT7Bxoq/R4ehv4Dv2jlJrso9yAGOvzuBur8pFR07X5fIPq+8kb8ZfNO/K6YnPGtqLz9995W/DIe6vzEXxDzVoso+grKZv6+Yob+SdwI9w8fiPeLtnL/poLq/zYkLPQab0z5sqaC/gbehvyplLT2nsgY+aeWjv27SiL/GKzg9GAgXvvChpr9g3V+/phYsPcbv2b4I36i/GPuIv/c3CT0H0/W9YJyrvwoEor9yxf48HGw7PuXZrr9PGYm/ImEOPVMmzL3Xl7G/2mBgv6Q2Bj2FLMO+QNazv+o4ib+i+M08G3+gvdSUtr8uPqK/qiHBPOp4Yz6D07m/uU+Jv+6G5Tz9DIG9jJK8v0RWor/4M9s8+hh0PrbRv781XLu/GiEBPbEUCj//kMO/bXGiv69QLT1eeoM+tNDGvx1+u78GWkI9DwEQP6uQyr8xm6K/2G5wPWL/kT420c2/ab6Jv3Tlgz26JD08dZLQv2bUor+BXoQ9Qs2lPibU07+p+om/IKKRPXzbVT2alta/GUliv5nFkz3CbV6+5NnYv8mdML/u34o9gPv4vgee2r9/y2K/iultPb0vMb6g4ty/exExv8K8Xz19yOS+66fev7w0Y7/NITs9k8AMvpHt4L+PqIq/Mt8vPT5ZLT5/s+O/jIdjv2C9PT34WuC9+fnlvz/Sir/6wzQ9cx5KPr3A6L+/36O/X+9EPW3vAD/GB+y/PP2Kv78xbj0I8Gc+Zs/uv3NAZL/qX4A9m65BvbkX8b+DNYu/LeB8PSdshz554PO/qbRkv4tFiT0slQS89Sn2v2Jxi7+w8Ig9FiScPujz+L+rMWW/dG6VPZymCz2kPvu/boczv/bTlj1jVnC+PQr9v6rdAb/oNo092NYAv7JW/r8cXKC+TTNxPW0BRr/0I/+/SFMCv6/WMT0G+uy+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.7106685638427734, 0.6668575406074524, 0.7000078558921814, 0.32227280735969543, 0.8854774832725525, 0.291730672121048, 0.8958092927932739, 0.7450428009033203, 0.5966638326644897, 0.7699317336082458, 0.5571064352989197, 0.7931985855102539, 0.48584210872650146, 0.8177021145820618, 0.5065338611602783, 0.8101471066474915, 0.48160436749458313, 0.8255948424339294, 0.4523012936115265, 0.838689386844635, 0.5816727876663208, 0.7651889324188232, 0.5962967872619629, 0.2425028383731842, 0.9109862446784973, 0.77065509557724, 0.5777051448822021, 0.7786453366279602, 0.43751171231269836, 0.8378877639770508, 0.5662569403648376, 0.21439701318740845, 0.9209125638008118, 0.8097880482673645, 0.5025780200958252, 0.8031399250030518, 0.4809136688709259, 0.8341689109802246, 0.4441937804222107, 0.8504447937011719, 0.5996227860450745, 0.2603808343410492, 0.8962920308113098, 0.261137455701828, 0.10159697383642197, 0.9471202492713928, 0.9088575839996338, 0.7872995138168335, 0.5310128927230835, 0.19528867304325104, 0.9223884344100952, 0.8343828320503235, 0.5757730007171631, 0.7634307742118835, 0.6077878475189209, 0.7426372766494751, 0.3664761483669281, 0.8727752566337585, 0.6759410500526428, 0.318977028131485, 0.8840462565422058, 0.6751586198806763, 0.7008159160614014, 0.6605274081230164, 0.28753361105918884, 0.8947510719299316, 0.7407499551773071, 0.605197548866272, 0.7575953602790833, 0.4200104773044586, 0.8475610017776489, 0.5779494643211365, 0.7699623703956604, 0.5632368326187134, 0.22127433121204376, 0.422776460647583, 0.8589866757392883, 0.35968849062919617, 0.880551278591156, 0.7051267623901367, 0.6363570690155029, 0.7504859566688538, 0.4244270622730255, 0.839689314365387, 0.5360040068626404, 0.8068986535072327, 0.48021307587623596, 0.16773734986782074, 0.9297579526901245, 0.8628906011581421, 0.3338986337184906, 0.8841833472251892, 0.7301704287528992, 0.590936541557312, 0.7732299566268921, 0.5246951580047607, 0.19215992093086243, 0.9209063649177551, 0.15553231537342072, 0.9304401874542236, 0.8749923706054688, 0.2799084186553955, 0.10450006276369095, 0.9438902139663696, 0.913950502872467, 0.8364750742912292, 0.6517844796180725, 0.37332117557525635, 0.831131100654602, 0.5788437724113464, 0.25698432326316833, 0.10453684628009796, 0.9427295327186584, 0.9109273552894592, 0.8311253190040588, 0.34625235199928284, 0.8574703931808472, 0.28901779651641846, 0.12165766209363937, 0.936893880367279, 0.8978785276412964, 0.18978732824325562, 0.9121976494789124, 0.844907820224762, 0.2898705303668976, 0.13065455853939056, 0.9321234822273254, 0.8911815285682678, 0.8097609281539917, 0.6649650931358337, 0.538287878036499, 0.7086115479469299, 0.51210618019104, 0.6975406408309937, 0.5411106944084167, 0.6807903051376343, 0.5588160753250122, 0.6732602119445801, 0.4336960017681122, 0.7775503993034363, 0.5796176791191101, 0.6648098826408386, 0.4240368902683258, 0.784044623374939, 0.4207342863082886, 0.7867113351821899, 0.4180883765220642, 0.7887042164802551, 0.4156350791454315, 0.7902842164039612, 0.5870823264122009, 0.6664329767227173, 0.5750594735145569, 0.6797168254852295, 0.44329673051834106, 0.7674208283424377, 0.5479292869567871, 0.7015162706375122, 0.525517463684082, 0.28095874190330505, 0.8653427958488464, 0.7485175132751465, 0.449240505695343, 0.7724223732948303, 0.5910126566886902, 0.3788011968135834, 0.7832255363464355, 0.5994870662689209, 0.635342538356781, 0.4285106658935547, 0.7436121702194214, 0.5554605722427368, 0.6636852025985718, 0.5333572626113892, 0.3222014307975769, 0.830599308013916, 0.3011248707771301, 0.8411584496498108, 0.7221211194992065, 0.4397510886192322, 0.7416161894798279, 0.590021550655365, 0.4335390627384186, 0.6965445280075073, 0.45166945457458496, 0.6808656454086304, 0.5349255800247192, 0.6335983872413635, 0.5182574391365051, 0.35444557666778564, 0.7886951565742493, 0.6626697778701782, 0.4773338735103607, 0.6776759624481201, 0.4576045274734497, 0.6930818557739258, 0.56284099817276, 0.4418734610080719, 0.34181904792785645, 0.7381816506385803], "advantages": [-14.484781265258789, -15.078315734863281, -15.304299354553223, -15.869322776794434, -15.188665390014648, -16.362459182739258, -15.527762413024902, -16.737367630004883, -16.96587562561035, -17.3212833404541, -17.5750675201416, -17.840639114379883, -18.11956214904785, -17.680397033691406, -18.96535301208496, -18.61017608642578, -19.816015243530273, -19.9058837890625, -20.327102661132812, -20.334800720214844, -20.787870407104492, -20.59699249267578, -21.577484130859375, -21.455488204956055, -21.033933639526367, -22.681228637695312, -23.63094711303711, -23.64626121520996, -24.598915100097656, -24.57905387878418, -25.287006378173828, -25.391054153442383, -25.24933624267578, -26.800457000732422, -27.81361961364746, -27.914308547973633, -28.73761558532715, -28.953563690185547, -30.030853271484375, -30.27353858947754, -31.412626266479492, -31.670278549194336, -30.77577018737793, -32.50199508666992, -31.50899314880371, -28.935720443725586, -31.675037384033203, -33.52141571044922, -34.59233474731445, -34.51129150390625, -33.061500549316406, -34.97571563720703, -36.23207473754883, -37.01788330078125, -37.580421447753906, -38.42612075805664, -38.92631912231445, -38.43696594238281, -39.8529052734375, -40.92127990722656, -41.733375549316406, -42.64072036743164, -42.90554428100586, -44.06563949584961, -44.26015090942383, -43.642452239990234, -45.19123840332031, -46.599796295166016, -46.51935577392578, -48.01278305053711, -49.343505859375, -49.771995544433594, -49.62903594970703, -51.206783294677734, -50.992889404296875, 8.386072158813477, 9.242635726928711, 8.318897247314453, 9.362587928771973, 8.40546703338623, 8.517806053161621, 8.303901672363281, 8.395092964172363, 9.29047966003418, 8.003610610961914, 8.021709442138672, 8.034357070922852, 8.190627098083496, 10.05794906616211, 8.954859733581543, 8.935110092163086, 9.549631118774414, 9.4678955078125, 10.095630645751953, 9.704157829284668, 10.212653160095215, 10.010623931884766, 11.269890785217285, 10.831707000732422, 12.433157920837402, 11.796772956848145, 11.732799530029297, 12.439088821411133, 14.711362838745117, 13.701303482055664, 13.11536693572998, 12.889432907104492, 12.967811584472656, 13.286262512207031, 12.04516887664795, 11.949445724487305, 13.052851676940918, 15.059213638305664, 14.138010025024414, 13.177241325378418, 12.223518371582031, 13.554078102111816, 12.447104454040527, 14.100028991699219, 16.052907943725586, 15.255127906799316, 14.207146644592285, 16.08226776123047, 15.220738410949707, 13.967734336853027, 15.876638412475586, 17.559951782226562, 17.040679931640625, 16.304439544677734, 15.002102851867676, 13.089179039001465, 15.41972541809082, 13.686527252197266, 11.47396183013916, 13.960506439208984, 11.804299354553223, 14.220174789428711, 12.197535514831543, 14.398192405700684, 15.626882553100586, 14.757031440734863, 13.119203567504883, 14.638959884643555, 15.24930477142334, 14.611503601074219, 14.963932037353516, 14.400782585144043, 14.551420211791992, 14.040635108947754, 14.042128562927246, 13.567328453063965, 12.871500015258789, 12.814275741577148, 12.241110801696777, 12.012792587280273, 11.757238388061523, 11.36470890045166, 10.993191719055176, 10.500683784484863, 10.238116264343262, 9.84238052368164, 9.327629089355469, 8.593448638916016, 8.625150680541992, 7.771075248718262, 7.172928810119629, 7.142759323120117, 6.691532135009766, 6.794996738433838, 6.0212016105651855, 5.939462184906006, 5.643169403076172, 6.004397392272949, 5.040994167327881, 5.55625057220459, 6.211331844329834, 4.9173583984375, 5.666738510131836, 4.396872043609619, 3.1750826835632324, 4.2400221824646, 2.9801526069641113, 2.3265228271484375, 2.5976719856262207, 2.6909706592559814, 2.7249648571014404, 3.0128118991851807, 3.991132974624634, 2.8632235527038574, 3.8758416175842285, 4.41290807723999, 3.267967700958252, 2.0713136196136475, 3.038623809814453, 1.8079873323440552, 2.6710569858551025, 1.430898904800415, 0.41751307249069214, -0.09992370754480362, -0.17169952392578125]}
+{"type": "SampleBatch", "eps_id": [90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 90188607, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAPQj/79IUwK/r9YxPQb67L7LOADAMG00vx/sCz35jyC+vR8BwKJ/Zr+bJ/48GvoVPsdGAsAeqjS/VRMLPXSLC74HLgPAZrxmv+nS/zw97io+X1UEwBLnNL8dlg09/wvtve08BcAcFQO/wRoEPUFuy7625AXAEYGivngcxzy6/i2/t0wGwIUd+70/gi88E/B2v+R0BsANwaK+VJIMvIcnK78O3QbAGyr7vRrTs7wGxXa/PgUHwAKKor7r4Ci98KAtv0RtB8BwEQO/lHBgvXIxzL4JFQjAUM80vyqOgL19vv29ePwIwGGEZr9WoYW9LOYXPogjCsBwG4y/xht/vb/o1j41igvAjPakvxi5XL1vMTE/gzANwIDWvb9wBSS9yqd3P38WD8Dly6S/AoupvAqUKT9gvBDA6MiLvzYN9LvoALo+OSISwGuXZb9S8j65G8+LPRpIE8BhxYu/UxabOsjGuD7qrRTAWphlv5KkCTy2dIw9zNMVwNjJi7+lHSA8k1G6Pqg5F8BeqWW/DK6LPBw0mD2fXxjAJcYzvy3blzwbDFq+vEUZwFLkAb/q72k8qdr/vkdyDL2+2Am9F/uHvLPZuLsNNA+9OZUlPrPniLxLZpu+6vQBvW3lB70Jorq820qEvLSsBL1eKCY+X0e9vNjBob6Ww+68xjUFvYUK8bz03/q8qBf0vLapaL4AD/a88ViBPsSoDL24ugG93KrMvDc6Sr37QA+9htxnvqvB1LwB+XA+gM0hvTZ+/bxtM668KwOGvXJWJL16bCg+AOy4vK3Aur4g3Ra9zlz4vMeu9LyxWKK971gZvSRmZr7R1QC9sbtQPoHHK722L/G87EXgvJ3syb3xMC69FQwqPlVt8Lwiscy+YJYgvWqN6rzZ9hi9a5juvdXuIr1ijmS+EIIivQAcKD6pNzW96+DVvi8PFb0EXuM+IXBXveVxY75xXOG8+IIPPjOiab3xatW+NWbKvAYa2T7l44W9aa5ivj3thLykQP09HPWOvUYf1b5BVWG89YXSPtoBoL0ydhy/PjK1uwYSMz+GCrm9YPvUvqucCjwGZs8+ZRTKvWtIYr5srIc8oarrPYgh073SKdW+34aaPPtr0z4eL+S9/8xivowu3jxWSQE+jkHtvfw727wf3vI8uYghvi1a7r11oWO+rwXZPP6YEz4ddfe9gMvhvEij8Dypbg++Ipb4vSojKz5MsNk8ecDYvq+98b3/Vui8/FOUPH+u+r0V5/K99n0qPgdGgDxGktG+PhXsvdlK7LzB1/Q7e9jkvbJD7b1rKCo+uJyrO3Pazb5Idea9AsjtvJbCN7thoNy9pKXnvX18Zb4Ee6K7o3w8PpTT8L3sm9a+T2WnukpL8z5j/wC+E2VlvqTIBjwPeDo+45UFvt2o1r4udEI8Omn0PgEsDr4hsGW+PnCvPCX4QD4BxBK+taDxvERQzjzoe8e9pl4TvgI5KT7VWr48Q5rDvjr8D75KV7g+JIZ/PGDUKr+TnAi+magoPspyEzv3T72+Cz0FvoNu+bx8mKi7OmKcva7cBb5u6Ga+b6Pau0LZWz7uegq+p07XvtzeG7soWQE/rRcTvn7GZr5BMv07U+tYPj61F74Brfi8JwNEPFiQoL1lVBi+GBBnvnVSKjwNRF8+cPMcvjdL+7xlxHE8UiGSvUSUHb5iLCg+52JaPOL7t744Nxq+DeS3PvRFyTvS1SW/LdwSvsPjJz7SQ9+7adS0vpWAD765hf68Dl1jvP9RgL16IxC+5Wlnvg3ld7zoB2c+UMQUvjR/174B9y2893MDPwBjHb6VCWe+qI+2uaGyXj7qASK+1tH5vNMdgzt4Ppq9zKEivkEXZ75PhCM7FOBfPvxAJ76klPq84gnhO84Mlr1b4Se+QzpnvskFsTuY5GI+PYEsvoIE/Lz3HSE89CCOvYgiLb41c2e+X2AKPKjNZz6OwzG+ULDXvrONVDx+jgU/NWQ6vhPDZ77EwL886LtuPtQGP77bOgG9T/PlPDVHVb0+rD++8BInPldr3TyE6au+01Q8vqN0BL1MaKY8aRAOvV7+PL4CYyY+kLmgPIxLpL54qjm+TcsGvQNNWDwJ6bS8AVc6vvLoJT5+EFE8VAOfvowFN76VUAi9mpfWO8KdY7wHtDe+p/ppvtB8zTt9y48+AGI8vs0QCb3CxUI8UV4hvHIRPb6iXCU+jYs/PMX2mL7Lwjm+WXIKveFLuzvI/B27AXQ6vtobJT5ut7k7aymWvqYmN74KHQu9u7JPuVezmjq22De+iQclPjPyNrkHSJW+w4s0vnQXC70VzMS7BUyLOs09Nb4zfWq+yBnEu61slT5i7jm+qmMKvXvLGrlCOzK7hp86vrA1JT4z1FO5G0WXvqZRN75sXgq9xD7Iu6p1ObvDAji+i05qvosZyrsUapM+abI8vlGmCb3LjNa5I6Xbu5piPb4CZiU+D2sOuglamb7DEzq+CpgJvasX1ru4feW74sM6vnGAJT6qrtq7wX2avoN0N74H0gi9LTdQvBkCN7ylIzi+LtJpvi3gU7xiEY4+z9A8vplRB72L5/G77MCdvAR+Pb5+hmm+Vob+u/3Lij6qKUK+dW4Gvce6GbtC5cS8vdVCvjs7Jj6eOzm7wImivqKCP77vIAa9JVUWvIxD0rxRLkC+/mcmPj++HrxaeKS+Udo8vu4stz6EAIS8p/Adv5qGNb7IxyY+ahXpvKKlqL6vMDK+gWQCvX2GD73QpTu9ltcyvu2zJz4/RxO969yyvvN8L77bevy8eeUvveyxi72JHjC+VbVmvvN7Nb3arFc+w7s0vlpG8rzsOiS9OgDEvdJWNb5demW++BEsvXB7PD6/7Tm+OVLWvtv9HL0vKe0+ZYBCvg9OZL5+F+68DYAiPlERR7741dW+fBfUvHFW4j7/nk++Z4Bjvuepi7w1uhA+zStUvo+b27y9A2m89W8gvlq4VL44jyw+Yi2OvB9a6L7ZRFG+EevXvKmH2Lxspiq+Cc9RvrBlYr591fO80szwPTBWVr5Z4NS+55HgvK8rzT4L2l6+lY9hvlPqnrwt1Ms96lxjvqPQy7zpm468sf5LvlvfY74XlC4+ij+vvPSi/r6EYWC+mEnHvK9dAL0bjFi+D+FgvjI+YL6SsBG9u72RPS5dZb6jer+8L9wLvYEbbr6612W+JVowPp/oHr3/KAm/zlBivmz7trzIzEq9O9qCvurFYr5W612+f7xfvT/LrDwkNme+qnDSviUCXr1clJc+DKFvviZUXL5zwUW9hpNYvCEJdL4lstG+qtZGvTIXhz5qbHy+uulavls5Mb2NKjO9oGaAvi0H0b6vzjS9IZ9wPtmUhL4CTBq/wY4hvYtvAz/ZwIq+K2vQvhX/7rxnlFU+9OuOvi+eWL7b0sy8ic6+vX8Wkb7/BdC+lBbcvLseRD6TP5W+89tXvoG1vLzgR+C9LGiXvsGoz77Gps683wg0PmSPm76lrxm/jtixvBO+6z4itaG+8lDPvg/RTLwq2iQ+mNqlvoSlVr5eEBi81N0KvhcAqL4LT2u8UIBEvFfH3L69Jai+8FVWvljmqLwFvxG+cEqqvoLqzr4cOMC8azoTPtltrr74sFW+pKmovLn2H77mkLC+F5jOvsJBwrwxBQU+qbK0vjoLVb4/+ay8CEAuvg3Utr40RM6+iNrIvOUe7T0j9Lq+Cv4Yv03itbxOH80+xxLBvrbszb5efWi82ujOPRwxxb7S1lO+WmJHvFbNSL5rT8e+WLvNvgfSg7zS5b09xGzLvtXBGL/ZQWm86bTCPgCJ0b6Fgs2+NErZu/dJqj02pdW+PR5TviPMoruYsli+rMHXvspszb7wvRa8Ic2iPXPd275F6VK+MmP5u7lDXb5h+d2+ru4vvJd/Q7zl2gK/iBXevmCgUr4Xf7W8dZZjvrww4L7NDM2+FenZvGzCgT2XSuS+R+pRvpuHz7xyTHO++WPmvoyrzL4edfa86ndAPeJ76r5rG1G+PsLuvIiTgr4zk+y+ozzMvodFDL02+uc85KjwvpPwF7+q8wm9j7iePsG89r5ovcu+6RzhvCzJ4Tvnz/q+srcXv+f737zf4JQ+v3AAv7FWy77JV7C8l2kqvEt5Ar8zihe/CgyyvO8CjT4tgQW/oQXLvmLshLzT+cS86YgHv9VmF7/m3Ii8KOaGPheQCr/qx8q+8GM7vBgHDb01lwy/mUwXvy+sRrzNXYI+3J0Pv8Kbyr7neea7cHYrvYqkEb+8Ohe/gvQAvI6Pfj7VqhS/1X/Kvo77Pbtatj69lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAAMs4AMAwbTS/H+wLPfmPIL69HwHAon9mv5sn/jwa+hU+x0YCwB6qNL9VEws9dIsLvgcuA8BmvGa/6dL/PD3uKj5fVQTAEuc0vx2WDT3/C+297TwFwBwVA7/BGgQ9QW7LvrbkBcARgaK+eBzHPLr+Lb+3TAbAhR37vT+CLzwT8Ha/5HQGwA3Bor5Ukgy8hycrvw7dBsAbKvu9GtOzvAbFdr8+BQfAAoqivuvgKL3woC2/RG0HwHARA7+UcGC9cjHMvgkVCMBQzzS/Ko6AvX2+/b14/AjAYYRmv1ahhb0s5hc+iCMKwHAbjL/GG3+9v+jWPjWKC8CM9qS/GLlcvW8xMT+DMA3AgNa9v3AFJL3Kp3c/fxYPwOXLpL8Ci6m8CpQpP2C8EMDoyIu/Ng30u+gAuj45IhLAa5dlv1LyPrkbz4s9GkgTwGHFi79TFps6yMa4PuqtFMBamGW/kqQJPLZ0jD3M0xXA2MmLv6UdIDyTUbo+qDkXwF6pZb8Mros8HDSYPZ9fGMAlxjO/LduXPBsMWr68RRnAUuQBv+rvaTyp2v++/+sZwNXkM7+MYYw7rW1Pvg00D705lSU+s+eIvEtmm77q9AG9beUHvQmiurzbSoS8tKwEvV4oJj5fR7282MGhvpbD7rzGNQW9hQrxvPTf+ryoF/S8tqlovgAP9rzxWIE+xKgMvbi6Ab3cqsy8NzpKvftAD72G3Ge+q8HUvAH5cD6AzSG9Nn79vG0zrrwrA4a9clYkvXpsKD4A7Li8rcC6viDdFr3OXPi8x670vLFYor3vWBm9JGZmvtHVAL2xu1A+gccrvbYv8bzsReC8nezJvfEwLr0VDCo+VW3wvCKxzL5gliC9ao3qvNn2GL1rmO691e4ivWKOZL4QgiK9ABwoPqk3Nb3r4NW+Lw8VvQRe4z4hcFe95XFjvnFc4bz4gg8+M6JpvfFq1b41Zsq8BhrZPuXjhb1prmK+Pe2EvKRA/T0c9Y69Rh/VvkFVYbz1hdI+2gGgvTJ2HL8+MrW7BhIzP4YKub1g+9S+q5wKPAZmzz5lFMq9a0hivmyshzyhqus9iCHTvdIp1b7fhpo8+2vTPh4v5L3/zGK+jC7ePFZJAT6OQe29/DvbvB/e8jy5iCG+LVruvXWhY76vBdk8/pgTPh11972Ay+G8SKPwPKluD74ilvi9KiMrPkyw2Tx5wNi+r73xvf9W6Lz8U5Q8f676vRXn8r32fSo+B0aAPEaS0b4+Fey92UrsvMHX9Dt72OS9skPtvWsoKj64nKs7c9rNvkh15r0CyO28lsI3u2Gg3L2kpee9fXxlvgR7orujfDw+lNPwveyb1r5PZae6SkvzPmP/AL4TZWW+pMgGPA94Oj7jlQW+3ajWvi50Qjw6afQ+ASwOviGwZb4+cK88JfhAPgHEEr61oPG8RFDOPOh7x72mXhO+AjkpPtVavjxDmsO+OvwPvkpXuD4khn88YNQqv5OcCL6ZqCg+ynITO/dPvb4LPQW+g275vHyYqLs6Ypy9rtwFvm7oZr5vo9q7QtlbPu56Cr6nTte+3N4buyhZAT+tFxO+fsZmvkEy/TtT61g+PrUXvgGt+LwnA0Q8WJCgvWVUGL4YEGe+dVIqPA1EXz5w8xy+N0v7vGXEcTxSIZK9RJQdvmIsKD7nYlo84vu3vjg3Gr4N5Lc+9EXJO9LVJb8t3BK+w+MnPtJD37tp1LS+lYAPvrmF/rwOXWO8/1GAvXojEL7laWe+DeV3vOgHZz5QxBS+NH/XvgH3Lbz3cwM/AGMdvpUJZ76oj7a5obJePuoBIr7W0fm80x2DO3g+mr3MoSK+QRdnvk+EIzsU4F8+/EAnvqSU+rziCeE7zgyWvVvhJ75DOme+yQWxO5jkYj49gSy+ggT8vPcdITz0II69iCItvjVzZ75fYAo8qM1nPo7DMb5QsNe+s41UPH6OBT81ZDq+E8NnvsTAvzzou24+1AY/vts6Ab1P8+U8NUdVvT6sP77wEic+V2vdPITpq77TVDy+o3QEvUxopjxpEA69Xv48vgJjJj6QuaA8jEukvniqOb5Nywa9A01YPAnptLwBVzq+8uglPn4QUTxUA5++jAU3vpVQCL2al9Y7wp1jvAe0N76n+mm+0HzNO33Ljz4AYjy+zRAJvcLFQjxRXiG8chE9vqJcJT6Niz88xfaYvsvCOb5Zcgq94Uu7O8j8HbsBdDq+2hslPm63uTtrKZa+piY3vgodC727sk+5V7OaOrbYN76JByU+M/I2uQdIlb7DizS+dBcLvRXMxLsFTIs6zT01vjN9ar7IGcS7rWyVPmLuOb6qYwq9e8sauUI7MruGnzq+sDUlPjPUU7kbRZe+plE3vmxeCr3EPsi7qnU5u8MCOL6LTmq+ixnKuxRqkz5psjy+UaYJvcuM1rkjpdu7mmI9vgJmJT4Paw66CVqZvsMTOr4KmAm9qxfWu7h95bviwzq+cYAlPqqu2rvBfZq+g3Q3vgfSCL0tN1C8GQI3vKUjOL4u0mm+LeBTvGIRjj7P0Dy+mVEHvYvn8bvswJ28BH49vn6Gab5Whv67/cuKPqopQr51bga9x7oZu0LlxLy91UK+OzsmPp47ObvAiaK+ooI/vu8gBr0lVRa8jEPSvFEuQL7+ZyY+P74evFp4pL5R2jy+7iy3PoQAhLyn8B2/moY1vsjHJj5qFem8oqWovq8wMr6BZAK9fYYPvdClO72W1zK+7bMnPj9HE73r3LK+83wvvtt6/Lx55S+97LGLvYkeML5VtWa+83s1vdqsVz7DuzS+WkbyvOw6JL06AMS90lY1vl16Zb74ESy9cHs8Pr/tOb45Uta+2/0cvS8p7T5lgEK+D05kvn4X7rwNgCI+URFHvvjV1b58F9S8cVbiPv+eT75ngGO+56mLvDW6ED7NK1S+j5vbvL0Dabz1byC+WrhUvjiPLD5iLY68H1rovtlEUb4R69e8qYfYvGymKr4Jz1G+sGVivn3V87zSzPA9MFZWvlng1L7nkeC8ryvNPgvaXr6Vj2G+U+qevC3Uyz3qXGO+o9DLvOmbjryx/ku+W99jvheULj6KP6+89KL+voRhYL6YSce8r10AvRuMWL4P4WC+Mj5gvpKwEb27vZE9Ll1lvqN6v7wv3Au9gRtuvrrXZb4lWjA+n+gevf8oCb/OUGK+bPu2vMjMSr072oK+6sVivlbrXb5/vF+9P8usPCQ2Z76qcNK+JQJevVyUlz4MoW++JlRcvnPBRb2Gk1i8IQl0viWy0b6q1ka9MheHPmpsfL666Vq+WzkxvY0qM72gZoC+LQfRvq/ONL0hn3A+2ZSEvgJMGr/BjiG9i28DP9nAir4ra9C+Ff/uvGeUVT70646+L55YvtvSzLyJzr69fxaRvv8F0L6UFty8ux5EPpM/lb7z21e+gbW8vOBH4L0saJe+wajPvsamzrzfCDQ+ZI+bvqWvGb+O2LG8E77rPiK1ob7yUM++D9FMvCraJD6Y2qW+hKVWvl4QGLzU3Qq+FwCovgtPa7xQgES8V8fcvr0lqL7wVVa+WOaovAW/Eb5wSqq+gurOvhw4wLxrOhM+2W2uvviwVb6kqai8ufYfvuaQsL4XmM6+wkHCvDEFBT6psrS+OgtVvj/5rLwIQC6+DdS2vjREzr6I2si85R7tPSP0ur4K/hi/TeK1vE4fzT7HEsG+tuzNvl59aLza6M49HDHFvtLWU75aYke8Vs1IvmtPx75Yu82+B9KDvNLlvT3EbMu+1cEYv9lBabzptMI+AInRvoWCzb40Stm790mqPTal1b49HlO+I8yiu5iyWL6swde+ymzNvvC9FrwhzaI9c93bvkXpUr4yY/m7uUNdvmH53b6u7i+8l39DvOXaAr+IFd6+YKBSvhd/tbx1lmO+vDDgvs0Mzb4V6dm8bMKBPZdK5L5H6lG+m4fPvHJMc775Y+a+jKvMvh519rzqd0A94nvqvmsbUb4+wu68iJOCvjOT7L6jPMy+h0UMvTb65zzkqPC+k/AXv6rzCb2PuJ4+wbz2vmi9y77pHOG8LMnhO+fP+r6ytxe/5/vfvN/glD6/cAC/sVbLvslXsLyXaSq8S3kCvzOKF78KDLK87wKNPi2BBb+hBcu+YuyEvNP5xLzpiAe/1WYXv+bciLwo5oY+F5AKv+rHyr7wYzu8GAcNvTWXDL+ZTBe/L6xGvM1dgj7cnQ+/wpvKvud55rtwdiu9iqQRv7w6F7+C9AC8jo9+PtWqFL/Vf8q+jvs9u1q2Pr07sRa/vyZNvrUCe7sHKa6+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.6544507145881653, 0.5457584857940674, 0.5860438346862793, 0.5376443266868591, 0.5921902060508728, 0.47045043110847473, 0.3688003420829773, 0.28719666600227356, 0.7795383930206299, 0.27813324332237244, 0.789827823638916, 0.7364574670791626, 0.6673202514648438, 0.5781663656234741, 0.46807172894477844, 0.34549811482429504, 0.23202326893806458, 0.851290225982666, 0.7621870636940002, 0.6445932984352112, 0.4761773943901062, 0.6428894400596619, 0.4743407666683197, 0.6430926322937012, 0.5292347073554993, 0.4302200973033905, 0.651005208492279, 0.47257623076438904, 0.8321378231048584, 0.44006696343421936, 0.846711277961731, 0.5994668006896973, 0.7510499954223633, 0.6216145753860474, 0.7359360456466675, 0.3600485026836395, 0.8741902112960815, 0.6773024797439575, 0.6830852627754211, 0.2998620867729187, 0.8930201530456543, 0.7390009164810181, 0.39635995030403137, 0.8568435311317444, 0.41082262992858887, 0.8541404008865356, 0.4139454662799835, 0.14439328014850616, 0.9384337067604065, 0.8699994087219238, 0.3571036756038666, 0.8817934989929199, 0.6820552945137024, 0.6750481128692627, 0.7081896662712097, 0.3528076708316803, 0.8665525317192078, 0.3597326874732971, 0.8664984703063965, 0.35669490694999695, 0.8697679042816162, 0.6561554074287415, 0.2855561375617981, 0.8997054696083069, 0.2662478983402252, 0.9064598679542542, 0.7613208889961243, 0.4218817949295044, 0.15980525314807892, 0.929334819316864, 0.8506399393081665, 0.6045637130737305, 0.24572443962097168, 0.9101377129554749, 0.7710452079772949, 0.5697740912437439, 0.7813941240310669, 0.4495353698730469, 0.16941535472869873, 0.9273754358291626, 0.8454079627990723, 0.5952923893928528, 0.2426290065050125, 0.9098319411277771, 0.7684805989265442, 0.5773200392723083, 0.7737385630607605, 0.5667344927787781, 0.7807573676109314, 0.5527257919311523, 0.21055147051811218, 0.919940710067749, 0.8117216229438782, 0.5185427665710449, 0.7889009714126587, 0.5330743789672852, 0.7833717465400696, 0.5405272841453552, 0.7816952466964722, 0.4587852954864502, 0.8338839411735535, 0.559673547744751, 0.7698712348937988, 0.5600287318229675, 0.7722243070602417, 0.5542570948600769, 0.7783169746398926, 0.45781058073043823, 0.831761360168457, 0.5481908321380615, 0.781792938709259, 0.46421143412590027, 0.8288088440895081, 0.5413583517074585, 0.7857681512832642, 0.5282618403434753, 0.7957147359848022, 0.4918915927410126, 0.8146284818649292, 0.49434101581573486, 0.8144001960754395, 0.508465051651001, 0.8045610189437866, 0.4914408028125763, 0.18430902063846588, 0.924901008605957, 0.8416562676429749, 0.4020020663738251, 0.8612001538276672, 0.6501213312149048, 0.7012129426002502, 0.6855759024620056, 0.3327900469303131, 0.8782522678375244, 0.34702253341674805, 0.8754173517227173, 0.6493537425994873, 0.27910852432250977, 0.8969979286193848, 0.7512675523757935, 0.411971777677536, 0.8512263298034668, 0.5800846815109253, 0.22813384234905243, 0.9121242761611938, 0.8007039427757263, 0.5000345706939697, 0.1792009472846985, 0.9271676540374756, 0.8509571552276611, 0.6207671761512756, 0.7240164279937744, 0.6591100096702576, 0.6916972398757935, 0.6911197304725647, 0.3402249217033386, 0.8746440410614014, 0.6500211954116821, 0.719810962677002, 0.6309230327606201, 0.7336190938949585, 0.38760659098625183, 0.8593938946723938, 0.616445004940033, 0.26291510462760925, 0.9001639485359192, 0.7577506899833679, 0.5744442939758301, 0.7690213322639465, 0.5550418496131897, 0.7803094983100891, 0.46576184034347534, 0.8227605223655701, 0.5346624255180359, 0.7869624495506287, 0.4784732162952423, 0.8164790868759155, 0.5286208391189575, 0.7853836417198181, 0.5218362212181091, 0.21094933152198792, 0.9143416285514832, 0.8076117038726807, 0.47136515378952026, 0.8195831179618835, 0.44397884607315063, 0.8325489163398743, 0.5871087312698364, 0.7374420762062073, 0.6010329723358154, 0.7265204191207886, 0.6098694801330566, 0.7187575697898865, 0.6143644452095032, 0.713981568813324, 0.6150655746459961, 0.7119860053062439, 0.6123565435409546, 0.7125591039657593, 0.39351317286491394], "advantages": [8.454148292541504, 9.196200370788574, 10.13184928894043, 8.526554107666016, 9.43067455291748, 7.7558722496032715, 6.444916725158691, 5.641612529754639, 5.24130916595459, 5.332033157348633, 4.707914352416992, 4.779219150543213, 5.189318656921387, 5.9860100746154785, 6.874715805053711, 7.2869672775268555, 6.798346996307373, 5.203686237335205, 4.268044471740723, 2.4109091758728027, 0.08171756565570831, 0.7529674172401428, -1.572979211807251, -0.9105083346366882, -3.2298479080200195, -5.45485258102417, -7.253082275390625, 21.1826171875, 21.755958557128906, 21.26007080078125, 21.923534393310547, 21.412616729736328, 21.80182456970215, 21.351381301879883, 21.706893920898438, 21.314987182617188, 22.19284439086914, 21.621185302734375, 21.960540771484375, 21.711021423339844, 22.8201904296875, 22.207000732421875, 22.533287048339844, 23.517616271972656, 22.411645889282227, 23.30562973022461, 22.292142868041992, 23.101261138916016, 24.29068946838379, 22.551244735717773, 21.62848663330078, 22.25595474243164, 21.334819793701172, 21.271648406982422, 21.246126174926758, 21.1569766998291, 22.092788696289062, 21.29338264465332, 22.29145622253418, 21.47045135498047, 22.546674728393555, 21.70326042175293, 21.687564849853516, 22.19637107849121, 21.277427673339844, 21.755176544189453, 20.80590057373047, 20.664134979248047, 21.587360382080078, 23.910627365112305, 22.0589542388916, 21.132137298583984, 21.04785919189453, 21.46733856201172, 20.51736068725586, 20.371789932250977, 20.247907638549805, 20.07720375061035, 21.049482345581055, 23.469865798950195, 21.53590202331543, 20.53139877319336, 20.386066436767578, 20.72189712524414, 19.805334091186523, 19.683643341064453, 19.514415740966797, 19.377185821533203, 19.197734832763672, 19.039470672607422, 18.851181030273438, 19.17801284790039, 18.146800994873047, 17.88332748413086, 18.7255859375, 17.691123962402344, 18.546552658081055, 17.494089126586914, 18.369646072387695, 17.29958724975586, 17.087799072265625, 16.749343872070312, 17.597673416137695, 16.51219940185547, 17.38596534729004, 16.283580780029297, 17.192625045776367, 16.072607040405273, 15.82264232635498, 15.496251106262207, 16.41285514831543, 15.27736759185791, 15.007856369018555, 14.68942642211914, 15.616179466247559, 14.464133262634277, 15.441168785095215, 14.268624305725098, 13.948468208312988, 13.697457313537598, 13.374672889709473, 13.111531257629395, 14.099346160888672, 12.90808391571045, 13.957744598388672, 16.537668228149414, 14.386635780334473, 13.119277000427246, 14.465866088867188, 13.170357704162598, 12.564947128295898, 12.840646743774414, 12.194572448730469, 11.955927848815918, 11.475937843322754, 11.258289337158203, 10.763253211975098, 11.073697090148926, 12.55833911895752, 11.265630722045898, 10.49610710144043, 10.074651718139648, 9.83311653137207, 10.370545387268066, 11.998294830322266, 10.678997993469238, 9.742652893066406, 10.581010818481445, 12.435568809509277, 11.064817428588867, 9.959051132202148, 8.884705543518066, 9.550018310546875, 8.42819595336914, 9.193204879760742, 8.024436950683594, 7.050354480743408, 7.310135841369629, 8.14702320098877, 6.96460485458374, 7.870704650878906, 6.6485395431518555, 5.455810070037842, 5.9837141036987305, 6.917320728302002, 8.14946174621582, 7.1181960105896, 5.809777736663818, 6.901096820831299, 5.584085464477539, 6.702444553375244, 5.387164115905762, 3.7831804752349854, 4.797779560089111, 5.898042678833008, 4.623800277709961, 2.9812726974487305, 4.0402607917785645, 5.092409133911133, 3.858175277709961, 4.8802289962768555, 6.062924861907959, 5.0544867515563965, 3.867476224899292, 4.799589157104492, 3.6534841060638428, 4.531859874725342, 3.4237136840820312, 1.8823411464691162, 2.8144659996032715, 1.3457458019256592, 2.200148582458496, 0.7978718280792236, 1.568223237991333, 0.22619734704494476, 0.9080145955085754, -0.3798799514770508, 0.21072059869766235, -1.0289427042007446, -0.5305595397949219]}
+{"type": "SampleBatch", "eps_id": [1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 1996087797, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265, 563481265], "obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAADuxFr+/Jk2+tQJ7uwcprr7Ttxe/OnPKvhg3Lrz6aEe9Gb4ZvwcjF78DKz68a2Z2PuvDHL+NC0m/1qPeu3aVBz9FySC/0REXv8boeDuUcXA+v84jv8BDyr5XKws8OCNovYvUJb9NHRe/UzLxO6BndD4/2ii/dmHKvs3ORjy3qVO9WOAqvwcjTb7y3zU8ulauvuvmK78Cjcq+j5iMO7qgNb1z7S2/CD4XvyoSXzv0rn8+z/Mwv26byr4glgk8qbArvXv6Mr8OSRe/1LP3Oy2+gT4QATa/VrnKvv3iTjy1FBe9CQg4v67UTb7azEI8ca+mvoAPOb9J58q+Qj6wOzXJ7rzvFju/2hROvuwjnTsN6KO+uB48v1z6yr7Io9K6v3zUvFcmPr8ubxe/X1ELuzlQiD6vLUG/s2BJv/ekUTtj6Q4/vTRFvyRwF7+KVms8kXyIPho8SL+BFMu+QFihPE96sLz8Q0q/6I8Xv6/QnTzJ+I0+/EtNv4Bdy74EP8s8dagXvJlUT798TE++xbrJPGWIlr7xXVC/IbrLviCPmTwFqM87fGdSv9zhF7/tmJo8kRicPsLte7x/Ckw84En1vCDFCr0R2Xe81vtUPuLW+rx19au+cLEzvDE3Wjzg7hi9/gZZvS1UL7wwmTm+DkYdvfYmaT5luGq8BGnAvhmfCr1UDQI/ce6yvGCLOL53AsK8j8dRPmJ10LxK/7++5XGgvOne+j7m8ga9j+k3viFVILynx0M+bKkVvUfJv76uXcO7pCf2PvxYNL2Trje+rmxvOy6vPj7LCkO9x42APA7A8Ttirte9ssFBvdrDVz50u6w725PKvtN+ML3UJH48UyItu16Az72GOS+9QLpXPq73mLu5J8q+bPcdvVfafzzx3E28TjrUve6vHL2doDe+ytFvvNWBPT6fYCu9NRyDPF0tM7xxyOW9+hAqvTRaWD5G8Ve8PQ7RvhXCGL1M7oU8ed6uvIxg9b04axe9ab42vsx/wryHCyo+0AkmvaYYv77BSqe8S/7mPh2dRL0eGTa+j786vJLCGz58LlO9z/mOPLLnCLxmmxO+d8BRvWLMWT6qIzi8OQDhvvVTQL0ARJE85xGkvKTxGb4T4D69NUBaPnKzvLyhBua+TGotvShMljyVJwO9sN4nvorpK72giDS+j5UQvdKj8j3eWjq9xymePO3gBr3gkj2++MU4vVyJM75lCxa9LqDGPeEiR71yZL2+eBkOvbp6wT5kcGW9WYQQvxpJ3ry90Cg/nNeJvXBeQr/IfGS8E4ZxP/bwqL3MXhC/blOhO8l7JT9eCsC9m9+8vq49kjzF6LU+fybPvd80Mr6wc8w8BtyLPVVH1r24yKk8AqTXPM2bXb4CbtW986FcPusutDyDLAC/u5rMvawjpDzTTUQ8X/lNvqLIy734dTO+bWQCPEwowz1P9tK9i8+hPBOeITzui0e+MSfSvbu4M75uhsM7J6nOPYtX2b0p9p88CtQCPG9xQr7Kiti9/OwzvoY2iTtTqtc9O73fvVnlvb7TOc475HHMPkzu7r00FDS+KvVpPDFx3j1OIva9mhucPDLGhjyR1je+fVr1vcgSWz5fuFI8phfvvi2X7L3KrZg86c1mO9xaLr6/0+u9g9ZaPjYp9DgSduy+2RLjvUFAmDx8bRW8cy0tvvdP4r3991o+MNhMvIvo7b66jdm9q8aaPIyNsrzpKzS+ncfYvbIhNL5gYc+8/tPgPSn8370ISKA85WS9vEBcQ74AL9+9YGwzvtum3Lxhk8E9TFzmvdcgpjxtKs28yn5TvqeH5b3CqTK+RQHvvPsHoD0prey9hgO9vtUz4ryOE7k+Ksz7vQbVMb5g+qa8JKZ2PZV0Ab51ILI8sBydvPOOdL6VAgG+0EAxvs09xLz7kEM9Ho4Evskotzwza7y8UTqBvuUYBL7pkDC+i8XlvIruBj3qoAe+mhy9PNdf4Lwic4m+4icHvkLBL77cLQa95ll9PL+rCr4eiLu+kukEvWFjmD4SLBK+9swuvoAP2bx7Q6e7DasVvjIQyzyZ5dm8vK+cvhcpFb4RBi6+rQQGvTIBs7wYpBi+EvfRPO7OB73UO6a+tx0YvmUPLb7dZyK9zpwuvciTG74sbdo84eUlvanysb79Bxu+b+Mrvp9eQr05Fou9D3gevoF7ub7f7ke9UHRWPmbjJb47eyq+2cY2vTc0yb1DTCm+N824vi3TPr2WWjg+orAwvngsDr+cEzC9xlLqPlkQPL7TJbi+uZUKvUNjGz4GbkO+/gIovsRO/LzUDBu+PspGvvust77Pjgq9/osGPhUjTr5HESe+lZb/vKbjL754elG+WjO3voHdDb0gJuM9cc5YvpJrDb9/xwS93fHIProeZL7Vtba+mUHJvIfMtz2ubWu+zDYNv2WNuryayL8+vrl2vihdtr73XHq8yDGZPSYFfr6EEw2/H9phvPWpuT4yp4S+A/o+v+UN1rtPoyY/zEqMvpQADb/widQ7aF+2Pqnukb5MJra+6fxePHpHhj1Ek5W+RhMNv/x4dDwkm7k+4Debvrxbtr5WobU8priYPY3dnr6+NQ2/EtnBPKqSvz6Kg6S+jLG2vrkm/zwEWbY97iqovqANJr6a3gY9gE5GvgfUqb76AwU9kALuPNmY877mfqm+YPUmvhcPoDy0QTK+UCqrvqSFt76ziYM8dGT/PfLVrr58eie+I/iXPKHHJr6wgrC+bMa3vquRejxW3Ao+ni+0vhb5J76SgJM8td0bvqHdtb5tgfw8lyB1PMjZ4L7UjLW+iXQovhpyyjsONBG+Ejy3vtEsuL76B1s7SH4cPgzrur4MDQ6/2avRO6Gj5D6mmcC+Aj+4visqezy5pB8+/kjEvmHqKL4UIJc8TRIHvmr5xb40fri+jYOBPGGJKj4Fqsm+nGopvrvMnDynCvi9uVvLvprR8DzX9Ig8tr3QvqoOy77v8Cm+i1EMPBLa4L22wcy+YeXtPDqv0DtKrcy+lnXMvugoKr45NdW65DHXvTIpzr7yWe08JVR0uzjqy74+3c2+5BQqvnSWP7wzqNq9p5DPvkDbuL6mkmK835M6Ph5D077+Uw6/Nd4mvH3k8D6O9Ni+Ta64vgEmS7r4zjI+H6bcvu9JDr92FjI79yHvPilX4r7ksbi+HZFFPLZvMz7MCOa+g8Mpvor8fjyUs+i9ZLvnvnd57jwWwVk8kn7NvhRv576eLyq+xHmsO+EK1r3BIum+qwi5vs72Tzu9YUI+IdbsvtB6Dr/qYuQ7ZI73Ph+J8r6VG7m+iVCIPK+pRT7gPPa+iqgqvs7wpzyEPsG9wvH3vgJhub4qe5g8sKJRPuam+74HOyu+1QW6PCMAqL1AXf2+Oa65vi+VrDwz9F4++IkAv/reK75gQdA87b6LvfZlAb8OBbq+YRPFPHPvbT4sQgO/TJgsvjYl6zzKnFe9GB8Ev4xnur5XheI8RO9+Pkr8Bb9day2+u6cFPeHYDr1E2ga/ANi6vlvMAj0aLYk+lrgIvyRdLr4Xvxg9CeZtvMaXCb8FWbu+lI4XPXNSlD5idwu/SXMvvtpJLz3i4hE89VcMv/WZvTyWBDA9MDqKvp85DL9vtDC+zOYZPSI8Ez3OGw2/J4W8vqTYHD1VNK4+av4Ov2nQMb4NuDg9o0F1PQXiD78NIL2+xJ89PRieuz4uxhG/QCYzvpSkWz0JrLU9fasSv+Gfnjzn6GI9sBA/vhySEr9Cx1o+459TPYYu7L4TehG/IiGSPOLVLT32chy+sWIRv0MiNr7OUSE9QaccPtNLEr+Xkog8EdotPbMRAr75NRK/LFQ3vkByIz3zCDc+oiATv37mfTzOFjI93QbPvVIME79EAlc+2s4pPYttwr4c+RG/OQhqPBazCj3CIJi9Y+YRvx/rVT5LnQQ9k1G2vpLUEL+1hlo8C+POPDyiWr0XwxC/III6vjwkxjzvGH0+0rERv3/zTjwbo+48/soavUShEb+oTFQ+BnLoPNJspL6FkRC/E3xBPFLUszx89aC8C4IQv4GRUz42nLA8lFOcvjxzD7+nhs0+5St9PGlUF78XZQ2/cKgYP7HgbTsTAGG/e1cKv2pjzT7sh2S8KsgVv69JCL/XsRg/OiDSvD7TYb/jOwW/h6vNPrZTMb2W/Ri/Xy0Dv5kuVD6zSGK9bkSjvsgdAr/4m1M8IGh8vfG2NL3aDAK/meBVPrECgL0FELa+F/sAvwuXcDxWk469I4GqvdfnAL/XrTe+UfyRvXIoPz7z0gG/obyIPNxWir36rAK+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "actions": [0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0], "rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "prev_actions": [1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1], "prev_rewards": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "dones": [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false], "new_obs": "BCJNGGhAFg0AAAAAAABQFg0AgIAFlQsNAAAAAAAAjCByYXkuY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEV9udW1weV9mcm9tYnVmZmVylJOUKJaADAAAAAAAANO3F786c8q+GDcuvPpoR70Zvhm/ByMXvwMrPrxrZnY+68Mcv40LSb/Wo967dpUHP0XJIL/RERe/xuh4O5RxcD6/ziO/wEPKvlcrCzw4I2i9i9Qlv00dF79TMvE7oGd0Pj/aKL92Ycq+zc5GPLepU71Y4Cq/ByNNvvLfNTy6Vq6+6+YrvwKNyr6PmIw7uqA1vXPtLb8IPhe/KhJfO/Sufz7P8zC/bpvKviCWCTypsCu9e/oyvw5JF7/Us/c7Lb6BPhABNr9Wucq+/eJOPLUUF70JCDi/rtRNvtrMQjxxr6a+gA85v0nnyr5CPrA7NcnuvO8WO7/aFE6+7COdOw3oo764Hjy/XPrKvsij0rq/fNS8VyY+vy5vF79fUQu7OVCIPq8tQb+zYEm/96RRO2PpDj+9NEW/JHAXv4pWazyRfIg+GjxIv4EUy75AWKE8T3qwvPxDSr/ojxe/r9CdPMn4jT78S02/gF3LvgQ/yzx1qBe8mVRPv3xMT77Fusk8ZYiWvvFdUL8husu+II+ZPAWozzt8Z1K/3OEXv+2YmjyRGJw+H3FVv5sAzL5OjMw8VB6VPBHZd7zW+1Q+4tb6vHX1q75wsTO8MTdaPODuGL3+Blm9LVQvvDCZOb4ORh299iZpPmW4arwEacC+GZ8KvVQNAj9x7rK8YIs4vncCwryPx1E+YnXQvEr/v77lcaC86d76PubyBr2P6Te+IVUgvKfHQz5sqRW9R8m/vq5dw7ukJ/Y+/Fg0vZOuN76ubG87Lq8+PssKQ73HjYA8DsDxO2Ku172ywUG92sNXPnS7rDvbk8q+034wvdQkfjxTIi27XoDPvYY5L71Aulc+rveYu7knyr5s9x29V9p/PPHcTbxOOtS97q8cvZ2gN77K0W+81YE9Pp9gK701HIM8XS0zvHHI5b36ECq9NFpYPkbxV7w9DtG+FcIYvUzuhTx53q68jGD1vThrF71pvja+zH/CvIcLKj7QCSa9phi/vsFKp7xL/uY+HZ1EvR4ZNr6Pvzq8ksIbPnwuU73P+Y48sucIvGabE753wFG9YsxZPqojOLw5AOG+9VNAvQBEkTznEaS8pPEZvhPgPr01QFo+crO8vKEG5r5Mai29KEyWPJUnA72w3ie+iukrvaCINL6PlRC90qPyPd5aOr3HKZ487eAGveCSPb74xTi9XIkzvmULFr0uoMY94SJHvXJkvb54GQ69unrBPmRwZb1ZhBC/GknevL3QKD+c14m9cF5Cv8h8ZLwThnE/9vCovcxeEL9uU6E7yXslP14KwL2b37y+rj2SPMXotT5/Js+93zQyvrBzzDwG3Is9VUfWvbjIqTwCpNc8zZtdvgJu1b3zoVw+6y60PIMsAL+7msy9rCOkPNNNRDxf+U2+osjLvfh1M75tZAI8TCjDPU/20r2Lz6E8E54hPO6LR74xJ9K9u7gzvm6Gwzsnqc49i1fZvSn2nzwK1AI8b3FCvsqK2L387DO+hjaJO1Oq1z07vd+9WeW9vtM5zjvkccw+TO7uvTQUNL4q9Wk8MXHePU4i9r2aG5w8MsaGPJHWN759WvW9yBJbPl+4UjymF+++LZfsvcqtmDzpzWY73Fouvr/T672D1lo+Nin0OBJ27L7ZEuO9QUCYPHxtFbxzLS2+90/ivf33Wj4w2Ey8i+jtvrqN2b2rxpo8jI2yvOkrNL6dx9i9siE0vmBhz7z+0+A9KfzfvQhIoDzlZL28QFxDvgAv371gbDO+26bcvGGTwT1MXOa91yCmPG0qzbzKflO+p4flvcKpMr5FAe+8+wegPSmt7L2GA72+1TPivI4TuT4qzPu9BtUxvmD6prwkpnY9lXQBvnUgsjywHJ288450vpUCAb7QQDG+zT3EvPuQQz0ejgS+ySi3PDNrvLxROoG+5RgEvumQML6LxeW8iu4GPeqgB76aHL0811/gvCJzib7iJwe+QsEvvtwtBr3mWX08v6sKvh6Iu76S6QS9YWOYPhIsEr72zC6+gA/ZvHtDp7sNqxW+MhDLPJnl2by8r5y+FykVvhEGLr6tBAa9MgGzvBikGL4S99E87s4HvdQ7pr63HRi+ZQ8tvt1nIr3OnC69yJMbvixt2jzh5SW9qfKxvv0HG75v4yu+n15CvTkWi70PeB6+gXu5vt/uR71QdFY+ZuMlvjt7Kr7Zxja9NzTJvUNMKb43zbi+LdM+vZZaOD6isDC+eCwOv5wTML3GUuo+WRA8vtMluL65lQq9Q2MbPgZuQ77+Aii+xE78vNQMG74+yka++6y3vs+OCr3+iwY+FSNOvkcRJ76Vlv+8puMvvnh6Ub5aM7e+gd0NvSAm4z1xzli+kmsNv3/HBL3d8cg+uh5kvtW1tr6ZQcm8h8y3Pa5ta77MNg2/ZY26vJrIvz6+uXa+KF22vvdcerzIMZk9JgV+voQTDb8f2mG89am5PjKnhL4D+j6/5Q3Wu0+jJj/MSoy+lAANv/CJ1DtoX7Y+qe6Rvkwmtr7p/F48ekeGPUSTlb5GEw2//Hh0PCSbuT7gN5u+vFu2vlahtTymuJg9jd2evr41Db8S2cE8qpK/PoqDpL6Msba+uSb/PARZtj3uKqi+oA0mvpreBj2ATka+B9SpvvoDBT2QAu482ZjzvuZ+qb5g9Sa+Fw+gPLRBMr5QKqu+pIW3vrOJgzx0ZP898tWuvnx6J74j+Jc8occmvrCCsL5sxre+q5F6PFbcCj6eL7S+FvknvpKAkzy13Ru+od21vm2B/DyXIHU8yNngvtSMtb6JdCi+GnLKOw40Eb4SPLe+0Sy4vvoHWztIfhw+DOu6vgwNDr/Zq9E7oaPkPqaZwL4CP7i+Kyp7PLmkHz7+SMS+YeoovhQglzxNEge+avnFvjR+uL6Ng4E8YYkqPgWqyb6caim+u8ycPKcK+L25W8u+mtHwPNf0iDy2vdC+qg7Lvu/wKb6LUQw8EtrgvbbBzL5h5e08Oq/QO0qtzL6Wdcy+6Cgqvjk11brkMde9MinOvvJZ7TwlVHS7OOrLvj7dzb7kFCq+dJY/vDOo2r2nkM++QNu4vqaSYrzfkzo+HkPTvv5TDr813ia8feTwPo702L5Nrri+ASZLuvjOMj4fpty+70kOv3YWMjv3Ie8+KVfivuSxuL4dkUU8tm8zPswI5r6Dwym+ivx+PJSz6L1ku+e+d3nuPBbBWTySfs2+FG/nvp4vKr7Eeaw74QrWvcEi6b6rCLm+zvZPO71hQj4h1uy+0HoOv+pi5Dtkjvc+H4nyvpUbub6JUIg8r6lFPuA89r6KqCq+zvCnPIQ+wb3C8fe+AmG5vip7mDywolE+5qb7vgc7K77VBbo8IwCovUBd/b45rrm+L5WsPDP0Xj74iQC/+t4rvmBB0Dztvou99mUBvw4Fur5hE8U8c+9tPixCA79MmCy+NiXrPMqcV70YHwS/jGe6vleF4jxE734+SvwFv11rLb67pwU94dgOvUTaBr8A2Lq+W8wCPRotiT6WuAi/JF0uvhe/GD0J5m28xpcJvwVZu76Ujhc9c1KUPmJ3C79Jcy++2kkvPeLiETz1Vwy/9Zm9PJYEMD0wOoq+nzkMv2+0ML7M5hk9IjwTPc4bDb8nhby+pNgcPVU0rj5q/g6/adAxvg24OD2jQXU9BeIPvw0gvb7Enz09GJ67Pi7GEb9AJjO+lKRbPQmstT19qxK/4Z+ePOfoYj2wED++HJISv0LHWj7jn1M9hi7svhN6Eb8iIZI84tUtPfZyHL6xYhG/QyI2vs5RIT1Bpxw+00sSv5eSiDwR2i09sxECvvk1Er8sVDe+QHIjPfMINz6iIBO/fuZ9PM4WMj3dBs+9UgwTv0QCVz7azik9i23Cvhz5Eb85CGo8FrMKPcIgmL1j5hG/H+tVPkudBD2TUba+ktQQv7WGWjwL4848PKJavRfDEL8ggjq+PCTGPO8YfT7SsRG/f/NOPBuj7jz+yhq9RKERv6hMVD4Gcug80mykvoWREL8TfEE8UtSzPHz1oLwLghC/gZFTPjacsDyUU5y+PHMPv6eGzT7lK308aVQXvxdlDb9wqBg/seBtOxMAYb97Vwq/amPNPuyHZLwqyBW/r0kIv9exGD86INK8PtNhv+M7Bb+Hq80+tlMxvZb9GL9fLQO/mS5UPrNIYr1uRKO+yB0Cv/ibUzwgaHy98bY0vdoMAr+Z4FU+sQKAvQUQtr4X+wC/C5dwPFaTjr0jgaq91+cAv9etN75R/JG9cig/PvPSAb+hvIg83FaKvfqsAr4TvQG/nag1vvqQj707cxI+lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUSwBLAYeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiS8hLBIaUjAFDlHSUUpQuAAAAAA==", "action_prob": [0.8386780619621277, 0.6170680522918701, 0.29762154817581177, 0.8812835812568665, 0.7161768674850464, 0.5861281752586365, 0.7250239253044128, 0.4307222366333008, 0.8073381185531616, 0.5680949091911316, 0.7324593663215637, 0.5545117855072021, 0.7401416301727295, 0.46197816729545593, 0.7817394137382507, 0.46400633454322815, 0.7808087468147278, 0.5391877889633179, 0.2582356929779053, 0.8891968131065369, 0.7556220889091492, 0.49789300560951233, 0.7666703462600708, 0.5249384045600891, 0.7187854051589966, 0.46403372287750244, 0.783170759677887, 0.42215362191200256, 0.8561401963233948, 0.6264865398406982, 0.27430078387260437, 0.8981290459632874, 0.28031423687934875, 0.8985252976417542, 0.2752423882484436, 0.9019171595573425, 0.7403168082237244, 0.3794480562210083, 0.863484263420105, 0.3660590350627899, 0.8700645565986633, 0.6569063067436218, 0.7097680568695068, 0.33334463834762573, 0.8819758296012878, 0.6965839862823486, 0.3344842791557312, 0.8818414807319641, 0.6680472493171692, 0.292938768863678, 0.8935829997062683, 0.26617470383644104, 0.9025678038597107, 0.7668690085411072, 0.5634430646896362, 0.7894557118415833, 0.47738903760910034, 0.17747515439987183, 0.07229751348495483, 0.9583975076675415, 0.9343551397323608, 0.8567352890968323, 0.6097709536552429, 0.2616086006164551, 0.8977722525596619, 0.7410420775413513, 0.623099684715271, 0.735676109790802, 0.6295695900917053, 0.7314804196357727, 0.3656616806983948, 0.8761318922042847, 0.6603246331214905, 0.2985423505306244, 0.8877400159835815, 0.28967994451522827, 0.8925988078117371, 0.2708573341369629, 0.9000024795532227, 0.756344735622406, 0.5857874751091003, 0.774013340473175, 0.5562936663627625, 0.7918604016304016, 0.4769485294818878, 0.825166642665863, 0.5114687085151672, 0.8129246234893799, 0.48256686329841614, 0.827178955078125, 0.4487704932689667, 0.842247724533081, 0.5899760127067566, 0.7585522532463074, 0.3902861773967743, 0.8634312152862549, 0.35294678807258606, 0.8768473267555237, 0.311359703540802, 0.8904280662536621, 0.7327364087104797, 0.6089584231376648, 0.7617281079292297, 0.4334581196308136, 0.8386838436126709, 0.5480337142944336, 0.7936604022979736, 0.5161818265914917, 0.8099989295005798, 0.5172784924507141, 0.7994233965873718, 0.5283619165420532, 0.7958510518074036, 0.5302416086196899, 0.20287469029426575, 0.9215015172958374, 0.8156279921531677, 0.4774072468280792, 0.8304324746131897, 0.4410741329193115, 0.8472754955291748, 0.6032063961029053, 0.2706635594367981, 0.8906767964363098, 0.7236868143081665, 0.6357442736625671, 0.7082706689834595, 0.6526896953582764, 0.3077017366886139, 0.879630982875824, 0.6955613493919373, 0.33899566531181335, 0.8788006901741028, 0.6858945488929749, 0.6545942425727844, 0.7015239596366882, 0.364946573972702, 0.8549193739891052, 0.3647949993610382, 0.8572750091552734, 0.3558928370475769, 0.8630107641220093, 0.6615264415740967, 0.31762996315956116, 0.8809774518013, 0.3071174621582031, 0.8852274417877197, 0.7119345664978027, 0.3860858678817749, 0.842163622379303, 0.6167616844177246, 0.281674325466156, 0.892139196395874, 0.7381556630134583, 0.5700241327285767, 0.7520197629928589, 0.5463149547576904, 0.7664353847503662, 0.5200193524360657, 0.7814029455184937, 0.4908669888973236, 0.7968623042106628, 0.4586855173110962, 0.8126866221427917, 0.42346981167793274, 0.828679084777832, 0.614531397819519, 0.6641597151756287, 0.3599458336830139, 0.8538905382156372, 0.32425886392593384, 0.8671514391899109, 0.7131369113922119, 0.4587548077106476, 0.7602694034576416, 0.518530011177063, 0.7551196813583374, 0.4863862693309784, 0.7743653655052185, 0.546653687953949, 0.6964569687843323, 0.5653630495071411, 0.6847819089889526, 0.42160478234291077, 0.8076634407043457, 0.6012789011001587, 0.6533136367797852, 0.6126033663749695, 0.3540980815887451, 0.17488938570022583, 0.9134647250175476, 0.15185877680778503, 0.9264314770698547, 0.877343475818634, 0.7660918831825256, 0.45997706055641174, 0.8052680492401123, 0.6057941317558289, 0.6759114265441895, 0.658988893032074], "advantages": [-14.482077598571777, -15.685440063476562, -16.98165512084961, -18.689483642578125, -18.35429573059082, -18.276588439941406, -19.51108169555664, -19.524442672729492, -19.298851013183594, -20.619993209838867, -21.804187774658203, -22.00367546081543, -23.159025192260742, -23.438030242919922, -23.278175354003906, -24.731821060180664, -24.560253143310547, -26.09242820739746, -27.209142684936523, -28.211423873901367, -28.946826934814453, -29.51181411743164, -30.523115158081055, -31.168453216552734, -31.123394012451172, -32.69401168823242, -33.637882232666016, 20.555456161499023, 21.328594207763672, 20.655712127685547, 20.952741622924805, 22.122591018676758, 20.547502517700195, 21.625835418701172, 20.1485652923584, 21.139678955078125, 19.73833656311035, 19.436079025268555, 20.202980041503906, 19.461645126342773, 20.312252044677734, 19.54850959777832, 19.672834396362305, 19.43801498413086, 20.432655334472656, 19.625638961791992, 19.747753143310547, 20.64727210998535, 19.414600372314453, 19.302186965942383, 20.448312759399414, 19.580795288085938, 20.900094985961914, 20.00322723388672, 20.08972930908203, 20.226919174194336, 20.29640007019043, 21.07607650756836, 22.30028533935547, 23.660932540893555, 21.360206604003906, 19.681779861450195, 18.878206253051758, 18.979501724243164, 20.185815811157227, 19.196428298950195, 19.033777236938477, 19.167083740234375, 18.99104118347168, 19.130474090576172, 18.940969467163086, 19.370500564575195, 18.56631088256836, 18.66836929321289, 19.941455841064453, 18.854915618896484, 20.249412536621094, 19.129140853881836, 20.67262840270996, 19.51805877685547, 19.27872085571289, 19.62356185913086, 19.35845947265625, 19.774532318115234, 19.477773666381836, 19.772727966308594, 19.255674362182617, 19.758981704711914, 19.394580841064453, 19.983083724975586, 19.57109832763672, 20.263437271118164, 19.792551040649414, 19.88861846923828, 19.666452407836914, 20.51788330078125, 19.93822479248047, 20.935327529907227, 20.2713680267334, 21.44013023376465, 20.682174682617188, 20.348600387573242, 20.754009246826172, 20.31786346435547, 20.135793685913086, 19.991016387939453, 20.579177856445312, 20.002666473388672, 20.730066299438477, 20.051679611206055, 19.57231330871582, 19.79802894592285, 19.25499153137207, 19.55126190185547, 18.948070526123047, 18.723281860351562, 18.39723014831543, 18.68284797668457, 18.07526206970215, 18.35774803161621, 17.734115600585938, 17.988117218017578, 18.638629913330078, 19.84379005432129, 19.021364212036133, 18.15662384033203, 18.96457862854004, 18.048799514770508, 18.881301879882812, 20.101469039916992, 19.232481002807617, 18.16534996032715, 17.01949119567871, 17.629920959472656, 18.522127151489258, 17.44660186767578, 18.339542388916016, 19.467432022094727, 18.582555770874023, 19.7237606048584, 18.806705474853516, 19.966487884521484, 19.018878936767578, 17.74654197692871, 16.18673324584961, 17.09309196472168, 15.582658767700195, 16.426761627197266, 17.364768981933594, 18.413944244384766, 17.486553192138672, 16.23153305053711, 14.662501335144043, 15.478194236755371, 16.339202880859375, 15.091856002807617, 15.916193962097168, 14.656871795654297, 15.441458702087402, 14.170373916625977, 14.91200065612793, 13.62952995300293, 14.324494361877441, 13.031535148620605, 13.675199508666992, 12.373804092407227, 12.959850311279297, 13.62219524383545, 12.558680534362793, 11.226810455322266, 11.694637298583984, 10.371880531311035, 10.763226509094238, 11.249979019165039, 12.206489562988281, 10.967809677124023, 9.777645111083984, 10.17342758178711, 8.961209297180176, 9.299959182739258, 10.088408470153809, 8.743889808654785, 9.506643295288086, 8.090642929077148, 6.752964019775391, 6.965818405151367, 7.63548469543457, 6.176755905151367, 6.823762893676758, 8.26770305633545, 10.404308319091797, 8.390480995178223, 10.522939682006836, 8.481830596923828, 6.279304027557373, 4.1629557609558105, 5.3500237464904785, 3.158806800842285, 1.3051939010620117, 1.8467903137207031]}
diff --git a/rllib/tests/data/cartpole_small/output-2019-02-03_20-27-20_worker-0_0.json b/rllib/tests/data/cartpole/small.json
similarity index 100%
rename from rllib/tests/data/cartpole_small/output-2019-02-03_20-27-20_worker-0_0.json
rename to rllib/tests/data/cartpole/small.json
diff --git a/rllib/tuned_examples/marwil/cartpole-marwil.yaml b/rllib/tuned_examples/marwil/cartpole-marwil.yaml
index e7818816579eb..6e9643778a589 100644
--- a/rllib/tuned_examples/marwil/cartpole-marwil.yaml
+++ b/rllib/tuned_examples/marwil/cartpole-marwil.yaml
@@ -10,6 +10,14 @@ cartpole-marwil:
config:
# Works for both torch and tf.
framework: tf
+ # In order to evaluate on an actual environment, use these following
+ # settings:
+ evaluation_num_workers: 1
+ evaluation_interval: 1
+ evaluation_config:
+ input: sampler
+ # Compare IL (beta=0) vs MARWIL.
beta:
- grid_search: [0, 1] # compare IL (beta=0) vs MARWIL
+ grid_search: [0, 1]
+ # The historic (offline) data file from the PPO run (at the top).
input: /tmp/out
|
graspologic-org__graspologic-654 | [BUG] Possible issue with direct import
```
import graspologic
dir(graspologic)
```
returns
```
['__builtins__',
'__cached__',
'__doc__',
'__file__',
'__loader__',
'__name__',
'__package__',
'__path__',
'__spec__',
'__version',
'__version__',
'graspologic',
'layouts',
'models',
'partition',
'plot',
'preprocessing',
'subgraph',
'version']
```
and is missing lots of modules (align, cluster, datasets, embed, inference, match, nominate, pipeline, utils).
Is this intentional?
[BUG] Possible issue with direct import
```
import graspologic
dir(graspologic)
```
returns
```
['__builtins__',
'__cached__',
'__doc__',
'__file__',
'__loader__',
'__name__',
'__package__',
'__path__',
'__spec__',
'__version',
'__version__',
'graspologic',
'layouts',
'models',
'partition',
'plot',
'preprocessing',
'subgraph',
'version']
```
and is missing lots of modules (align, cluster, datasets, embed, inference, match, nominate, pipeline, utils).
Is this intentional?
| [
{
"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nimport graspologic.align\nimport graspologic.cluster\nimport graspologic.datasets\nimport graspologic.embed\nimport graspologic.inference\nimport graspologic.layouts\nimport graspologic.models\nimport graspologic.partition\nimport graspologic.preprocessing\nimport graspologic.plot\nimport graspologic.simulations\nimport graspologic.subgraph\nimport graspologic.utils\n\nfrom graspologic.version import __version\n\n__version__ = __version()\n",
"path": "graspologic/__init__.py"
}
] | [
{
"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nimport graspologic.align\nimport graspologic.cluster\nimport graspologic.datasets\nimport graspologic.embed\nimport graspologic.inference\nimport graspologic.layouts\nimport graspologic.models\nimport graspologic.nominate\nimport graspologic.partition\nimport graspologic.preprocessing\nimport graspologic.plot\nimport graspologic.simulations\nimport graspologic.subgraph\nimport graspologic.utils\n\nfrom graspologic.version import __version\n\n__version__ = __version()\n",
"path": "graspologic/__init__.py"
}
] | diff --git a/graspologic/__init__.py b/graspologic/__init__.py
index ad156e719..9d6596604 100644
--- a/graspologic/__init__.py
+++ b/graspologic/__init__.py
@@ -8,6 +8,7 @@
import graspologic.inference
import graspologic.layouts
import graspologic.models
+import graspologic.nominate
import graspologic.partition
import graspologic.preprocessing
import graspologic.plot
|
archlinux__archinstall-1906 | Presence of ROM device raises `_ped.DiskException`
The list of devices returned by `parted.getAllDevices()` is iterated over and each device is passed to `parted.Disk()`. If that raises an 'unrecognised disk label' `DiskLabelException` then the device is passed to `parted.freshDisk()`. ROM devices are inlcuded in the list of devices returned by `parted.getAllDevices()` and will cause both of these to raise an exception.
https://github.com/archlinux/archinstall/blob/5276d95339368210e75791e2b88c1bf5aca4517b/archinstall/lib/disk/device_handler.py#L47-L52
### Proposed fix
Do not initialize `parted.Disk` or `parted.freshDisk` with ROM devices. Use `get_lsblk_info()` to get the type of a device and skip the device if the type is 'rom'. See the commit https://github.com/codefiles/archinstall/commit/08b963f563387d0e1c4341109a13e85449190022 for an implementation of this fix.
### Traceback
```
Traceback (most recent call last):
File "/home/scripttest/archinstall/archinstall/lib/disk/device_handler.py", line 49, in load_devices
disk = Disk(device)
^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/parted/decorators.py", line 42, in new
ret = fn(*args, **kwds)
^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/parted/disk.py", line 52, in __init__
self.__disk = _ped.Disk(device.getPedDevice())
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
_ped.DiskLabelException: /dev/sr0: unrecognised disk label
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/scripttest/archinstall/.venv/bin/archinstall", line 5, in <module>
from archinstall import run_as_a_module
File "/home/scripttest/archinstall/archinstall/__init__.py", line 8, in <module>
from .lib import disk
File "/home/scripttest/archinstall/archinstall/lib/disk/__init__.py", line 1, in <module>
from .device_handler import device_handler, disk_layouts
File "/home/scripttest/archinstall/archinstall/lib/disk/device_handler.py", line 603, in <module>
device_handler = DeviceHandler()
^^^^^^^^^^^^^^^
File "/home/scripttest/archinstall/archinstall/lib/disk/device_handler.py", line 38, in __init__
self.load_devices()
File "/home/scripttest/archinstall/archinstall/lib/disk/device_handler.py", line 52, in load_devices
disk = freshDisk(device, PartitionTable.GPT.value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/parted/decorators.py", line 42, in new
ret = fn(*args, **kwds)
^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/parted/__init__.py", line 546, in freshDisk
peddisk = disk_new_fresh(device.getPedDevice(), ty)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
_ped.DiskException: device is too small for GPT
```
### Device information
- `lsblk -o NAME,PATH,TYPE /dev/sr0`
```
NAME PATH TYPE
sr0 /dev/sr0 rom
```
- pyparted
```python
import parted
device = parted.getDevice('/dev/sr0')
print(device)
print(parted.devices[device.type])
```
**Output**
```
parted.Device instance --
model: QEMU QEMU DVD-ROM path: /dev/sr0 type: 1
sectorSize: 2048 physicalSectorSize: 2048
length: 1 openCount: 0 readOnly: False
externalMode: False dirty: False bootDirty: False
host: 1 did: 0 busy: False
hardwareGeometry: (0, 255, 5) biosGeometry: (0, 255, 5)
PedDevice: <_ped.Device object at 0x7f7652da5040>
scsi
```
| [
{
"content": "from __future__ import annotations\n\nimport json\nimport os\nimport time\nfrom pathlib import Path\nfrom typing import List, Dict, Any, Optional, TYPE_CHECKING\n\nfrom parted import ( # type: ignore\n\tDisk, Geometry, FileSystem,\n\tPartitionException, DiskLabelException,\n\tgetAllDevices, freshDisk, Partition, Device\n)\n\nfrom .device_model import (\n\tDeviceModification, PartitionModification,\n\tBDevice, _DeviceInfo, _PartitionInfo,\n\tFilesystemType, Unit, PartitionTable,\n\tModificationStatus, get_lsblk_info, LsblkInfo,\n\t_BtrfsSubvolumeInfo, get_all_lsblk_info, DiskEncryption\n)\n\nfrom ..exceptions import DiskError, UnknownFilesystemFormat\nfrom ..general import SysCommand, SysCallError, JSON\nfrom ..luks import Luks2\nfrom ..output import debug, error, info, warn\nfrom ..utils.util import is_subpath\n\nif TYPE_CHECKING:\n\t_: Any\n\n\nclass DeviceHandler(object):\n\t_TMP_BTRFS_MOUNT = Path('/mnt/arch_btrfs')\n\n\tdef __init__(self):\n\t\tself._devices: Dict[Path, BDevice] = {}\n\t\tself.load_devices()\n\n\t@property\n\tdef devices(self) -> List[BDevice]:\n\t\treturn list(self._devices.values())\n\n\tdef load_devices(self):\n\t\tblock_devices = {}\n\n\t\tfor device in getAllDevices():\n\t\t\ttry:\n\t\t\t\tdisk = Disk(device)\n\t\t\texcept DiskLabelException as err:\n\t\t\t\tif 'unrecognised disk label' in getattr(error, 'message', str(err)):\n\t\t\t\t\tdisk = freshDisk(device, PartitionTable.GPT.value)\n\t\t\t\telse:\n\t\t\t\t\tdebug(f'Unable to get disk from device: {device}')\n\t\t\t\t\tcontinue\n\n\t\t\tdevice_info = _DeviceInfo.from_disk(disk)\n\t\t\tpartition_infos = []\n\n\t\t\tfor partition in disk.partitions:\n\t\t\t\tlsblk_info = get_lsblk_info(partition.path)\n\t\t\t\tfs_type = self._determine_fs_type(partition, lsblk_info)\n\t\t\t\tsubvol_infos = []\n\n\t\t\t\tif fs_type == FilesystemType.Btrfs:\n\t\t\t\t\tsubvol_infos = self.get_btrfs_info(partition.path)\n\n\t\t\t\tpartition_infos.append(\n\t\t\t\t\t_PartitionInfo.from_partition(\n\t\t\t\t\t\tpartition,\n\t\t\t\t\t\tfs_type,\n\t\t\t\t\t\tlsblk_info.partuuid,\n\t\t\t\t\t\tlsblk_info.mountpoints,\n\t\t\t\t\t\tsubvol_infos\n\t\t\t\t\t)\n\t\t\t\t)\n\n\t\t\tblock_device = BDevice(disk, device_info, partition_infos)\n\t\t\tblock_devices[block_device.device_info.path] = block_device\n\n\t\tself._devices = block_devices\n\n\tdef _determine_fs_type(\n\t\tself,\n\t\tpartition: Partition,\n\t\tlsblk_info: Optional[LsblkInfo] = None\n\t) -> Optional[FilesystemType]:\n\t\ttry:\n\t\t\tif partition.fileSystem:\n\t\t\t\treturn FilesystemType(partition.fileSystem.type)\n\t\t\telif lsblk_info is not None:\n\t\t\t\treturn FilesystemType(lsblk_info.fstype) if lsblk_info.fstype else None\n\t\t\treturn None\n\t\texcept ValueError:\n\t\t\tdebug(f'Could not determine the filesystem: {partition.fileSystem}')\n\n\t\treturn None\n\n\tdef get_device(self, path: Path) -> Optional[BDevice]:\n\t\treturn self._devices.get(path, None)\n\n\tdef get_device_by_partition_path(self, partition_path: Path) -> Optional[BDevice]:\n\t\tpartition = self.find_partition(partition_path)\n\t\tif partition:\n\t\t\tdevice: Device = partition.disk.device\n\t\t\treturn self.get_device(Path(device.path))\n\t\treturn None\n\n\tdef find_partition(self, path: Path) -> Optional[_PartitionInfo]:\n\t\tfor device in self._devices.values():\n\t\t\tpart = next(filter(lambda x: str(x.path) == str(path), device.partition_infos), None)\n\t\t\tif part is not None:\n\t\t\t\treturn part\n\t\treturn None\n\n\tdef get_uuid_for_path(self, path: Path) -> Optional[str]:\n\t\tpartition = self.find_partition(path)\n\t\treturn partition.partuuid if partition else None\n\n\tdef get_btrfs_info(self, dev_path: Path) -> List[_BtrfsSubvolumeInfo]:\n\t\tlsblk_info = get_lsblk_info(dev_path)\n\t\tsubvol_infos: List[_BtrfsSubvolumeInfo] = []\n\n\t\tif not lsblk_info.mountpoint:\n\t\t\tself.mount(dev_path, self._TMP_BTRFS_MOUNT, create_target_mountpoint=True)\n\t\t\tmountpoint = self._TMP_BTRFS_MOUNT\n\t\telse:\n\t\t\t# when multiple subvolumes are mounted then the lsblk output may look like\n\t\t\t# \"mountpoint\": \"/mnt/archinstall/.snapshots\"\n\t\t\t# \"mountpoints\": [\"/mnt/archinstall/.snapshots\", \"/mnt/archinstall/home\", ..]\n\t\t\t# so we'll determine the minimum common path and assume that's the root\n\t\t\tpath_strings = [str(m) for m in lsblk_info.mountpoints]\n\t\t\tcommon_prefix = os.path.commonprefix(path_strings)\n\t\t\tmountpoint = Path(common_prefix)\n\n\t\ttry:\n\t\t\tresult = SysCommand(f'btrfs subvolume list {mountpoint}')\n\t\texcept SysCallError as err:\n\t\t\tdebug(f'Failed to read btrfs subvolume information: {err}')\n\t\t\treturn subvol_infos\n\n\t\ttry:\n\t\t\tif decoded := result.decode('utf-8'):\n\t\t\t\t# ID 256 gen 16 top level 5 path @\n\t\t\t\tfor line in decoded.splitlines():\n\t\t\t\t\t# expected output format:\n\t\t\t\t\t# ID 257 gen 8 top level 5 path @home\n\t\t\t\t\tname = Path(line.split(' ')[-1])\n\t\t\t\t\tsub_vol_mountpoint = lsblk_info.btrfs_subvol_info.get(name, None)\n\t\t\t\t\tsubvol_infos.append(_BtrfsSubvolumeInfo(name, sub_vol_mountpoint))\n\t\texcept json.decoder.JSONDecodeError as err:\n\t\t\terror(f\"Could not decode lsblk JSON: {result}\")\n\t\t\traise err\n\n\t\tif not lsblk_info.mountpoint:\n\t\t\tself.umount(dev_path)\n\n\t\treturn subvol_infos\n\n\tdef _perform_formatting(\n\t\tself,\n\t\tfs_type: FilesystemType,\n\t\tpath: Path,\n\t\tadditional_parted_options: List[str] = []\n\t):\n\t\toptions = []\n\t\tcommand = ''\n\n\t\tmatch fs_type:\n\t\t\tcase FilesystemType.Btrfs:\n\t\t\t\toptions += ['-f']\n\t\t\t\tcommand += 'mkfs.btrfs'\n\t\t\tcase FilesystemType.Fat16:\n\t\t\t\toptions += ['-F16']\n\t\t\t\tcommand += 'mkfs.fat'\n\t\t\tcase FilesystemType.Fat32:\n\t\t\t\toptions += ['-F32']\n\t\t\t\tcommand += 'mkfs.fat'\n\t\t\tcase FilesystemType.Ext2:\n\t\t\t\toptions += ['-F']\n\t\t\t\tcommand += 'mkfs.ext2'\n\t\t\tcase FilesystemType.Ext3:\n\t\t\t\toptions += ['-F']\n\t\t\t\tcommand += 'mkfs.ext3'\n\t\t\tcase FilesystemType.Ext4:\n\t\t\t\toptions += ['-F']\n\t\t\t\tcommand += 'mkfs.ext4'\n\t\t\tcase FilesystemType.Xfs:\n\t\t\t\toptions += ['-f']\n\t\t\t\tcommand += 'mkfs.xfs'\n\t\t\tcase FilesystemType.F2fs:\n\t\t\t\toptions += ['-f']\n\t\t\t\tcommand += 'mkfs.f2fs'\n\t\t\tcase FilesystemType.Ntfs:\n\t\t\t\toptions += ['-f', '-Q']\n\t\t\t\tcommand += 'mkfs.ntfs'\n\t\t\tcase FilesystemType.Reiserfs:\n\t\t\t\tcommand += 'mkfs.reiserfs'\n\t\t\tcase _:\n\t\t\t\traise UnknownFilesystemFormat(f'Filetype \"{fs_type.value}\" is not supported')\n\n\t\toptions += additional_parted_options\n\t\toptions_str = ' '.join(options)\n\n\t\tinfo(f'Formatting filesystem: /usr/bin/{command} {options_str} {path}')\n\n\t\ttry:\n\t\t\tSysCommand(f\"/usr/bin/{command} {options_str} {path}\")\n\t\texcept SysCallError as err:\n\t\t\tmsg = f'Could not format {path} with {fs_type.value}: {err.message}'\n\t\t\terror(msg)\n\t\t\traise DiskError(msg) from err\n\n\tdef _perform_enc_formatting(\n\t\tself,\n\t\tdev_path: Path,\n\t\tmapper_name: Optional[str],\n\t\tfs_type: FilesystemType,\n\t\tenc_conf: DiskEncryption\n\t):\n\t\tluks_handler = Luks2(\n\t\t\tdev_path,\n\t\t\tmapper_name=mapper_name,\n\t\t\tpassword=enc_conf.encryption_password\n\t\t)\n\n\t\tkey_file = luks_handler.encrypt()\n\n\t\tdebug(f'Unlocking luks2 device: {dev_path}')\n\t\tluks_handler.unlock(key_file=key_file)\n\n\t\tif not luks_handler.mapper_dev:\n\t\t\traise DiskError('Failed to unlock luks device')\n\n\t\tinfo(f'luks2 formatting mapper dev: {luks_handler.mapper_dev}')\n\t\tself._perform_formatting(fs_type, luks_handler.mapper_dev)\n\n\t\tinfo(f'luks2 locking device: {dev_path}')\n\t\tluks_handler.lock()\n\n\tdef _validate(self, device_mod: DeviceModification):\n\t\tchecks = {\n\t\t\t# verify that all partitions have a path set (which implies that they have been created)\n\t\t\tlambda x: x.dev_path is None: ValueError('When formatting, all partitions must have a path set'),\n\t\t\t# crypto luks is not a valid file system type\n\t\t\tlambda x: x.fs_type is FilesystemType.Crypto_luks: ValueError('Crypto luks cannot be set as a filesystem type'),\n\t\t\t# file system type must be set\n\t\t\tlambda x: x.fs_type is None: ValueError('File system type must be set for modification')\n\t\t}\n\n\t\tfor check, exc in checks.items():\n\t\t\tfound = next(filter(check, device_mod.partitions), None)\n\t\t\tif found is not None:\n\t\t\t\traise exc\n\n\tdef format(\n\t\tself,\n\t\tdevice_mod: DeviceModification,\n\t\tenc_conf: Optional['DiskEncryption'] = None\n\t):\n\t\t\"\"\"\n\t\tFormat can be given an overriding path, for instance /dev/null to test\n\t\tthe formatting functionality and in essence the support for the given filesystem.\n\t\t\"\"\"\n\t\tself._validate(device_mod)\n\n\t\t# make sure all devices are unmounted\n\t\tself._umount_all_existing(device_mod)\n\n\t\tfor part_mod in device_mod.partitions:\n\t\t\t# partition will be encrypted\n\t\t\tif enc_conf is not None and part_mod in enc_conf.partitions:\n\t\t\t\tself._perform_enc_formatting(\n\t\t\t\t\tpart_mod.safe_dev_path,\n\t\t\t\t\tpart_mod.mapper_name,\n\t\t\t\t\tpart_mod.safe_fs_type,\n\t\t\t\t\tenc_conf\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\tself._perform_formatting(part_mod.safe_fs_type, part_mod.safe_dev_path)\n\n\tdef _perform_partitioning(\n\t\tself,\n\t\tpart_mod: PartitionModification,\n\t\tblock_device: BDevice,\n\t\tdisk: Disk,\n\t\trequires_delete: bool\n\t):\n\t\t# when we require a delete and the partition to be (re)created\n\t\t# already exists then we have to delete it first\n\t\tif requires_delete and part_mod.status in [ModificationStatus.Modify, ModificationStatus.Delete]:\n\t\t\tinfo(f'Delete existing partition: {part_mod.safe_dev_path}')\n\t\t\tpart_info = self.find_partition(part_mod.safe_dev_path)\n\n\t\t\tif not part_info:\n\t\t\t\traise DiskError(f'No partition for dev path found: {part_mod.safe_dev_path}')\n\n\t\t\tdisk.deletePartition(part_info.partition)\n\t\t\tdisk.commit()\n\n\t\tif part_mod.status == ModificationStatus.Delete:\n\t\t\treturn\n\n\t\tstart_sector = part_mod.start.convert(\n\t\t\tUnit.sectors,\n\t\t\tblock_device.device_info.sector_size\n\t\t)\n\n\t\tlength_sector = part_mod.length.convert(\n\t\t\tUnit.sectors,\n\t\t\tblock_device.device_info.sector_size\n\t\t)\n\n\t\tgeometry = Geometry(\n\t\t\tdevice=block_device.disk.device,\n\t\t\tstart=start_sector.value,\n\t\t\tlength=length_sector.value\n\t\t)\n\n\t\tfilesystem = FileSystem(type=part_mod.safe_fs_type.value, geometry=geometry)\n\n\t\tpartition = Partition(\n\t\t\tdisk=disk,\n\t\t\ttype=part_mod.type.get_partition_code(),\n\t\t\tfs=filesystem,\n\t\t\tgeometry=geometry\n\t\t)\n\n\t\tfor flag in part_mod.flags:\n\t\t\tpartition.setFlag(flag.value)\n\n\t\tdebug(f'\\tType: {part_mod.type.value}')\n\t\tdebug(f'\\tFilesystem: {part_mod.safe_fs_type.value}')\n\t\tdebug(f'\\tGeometry: {start_sector.value} start sector, {length_sector.value} length')\n\n\t\ttry:\n\t\t\tdisk.addPartition(partition=partition, constraint=disk.device.optimalAlignedConstraint)\n\t\t\tdisk.commit()\n\n\t\t\t# the creation will take a bit of time\n\t\t\ttime.sleep(3)\n\n\t\t\t# the partition has a real path now as it was created\n\t\t\tpart_mod.dev_path = Path(partition.path)\n\n\t\t\tlsblk_info = self._fetch_partuuid(part_mod.dev_path)\n\n\t\t\tpart_mod.partuuid = lsblk_info.partuuid\n\t\t\tpart_mod.uuid = lsblk_info.uuid\n\t\texcept PartitionException as ex:\n\t\t\traise DiskError(f'Unable to add partition, most likely due to overlapping sectors: {ex}') from ex\n\n\tdef _fetch_partuuid(self, path: Path) -> LsblkInfo:\n\t\tattempts = 3\n\t\tlsblk_info: Optional[LsblkInfo] = None\n\n\t\tself.partprobe(path)\n\t\tfor attempt_nr in range(attempts):\n\t\t\ttime.sleep(attempt_nr + 1)\n\t\t\tlsblk_info = get_lsblk_info(path)\n\n\t\t\tif lsblk_info.partuuid:\n\t\t\t\tbreak\n\n\t\t\tself.partprobe(path)\n\n\t\tif not lsblk_info or not lsblk_info.partuuid:\n\t\t\tdebug(f'Unable to determine new partition uuid: {path}\\n{lsblk_info}')\n\t\t\traise DiskError(f'Unable to determine new partition uuid: {path}')\n\n\t\tdebug(f'partuuid found: {lsblk_info.json()}')\n\n\t\treturn lsblk_info\n\n\tdef create_btrfs_volumes(\n\t\tself,\n\t\tpart_mod: PartitionModification,\n\t\tenc_conf: Optional['DiskEncryption'] = None\n\t):\n\t\tinfo(f'Creating subvolumes: {part_mod.safe_dev_path}')\n\n\t\tluks_handler = None\n\n\t\t# unlock the partition first if it's encrypted\n\t\tif enc_conf is not None and part_mod in enc_conf.partitions:\n\t\t\tif not part_mod.mapper_name:\n\t\t\t\traise ValueError('No device path specified for modification')\n\n\t\t\tluks_handler = self.unlock_luks2_dev(\n\t\t\t\tpart_mod.safe_dev_path,\n\t\t\t\tpart_mod.mapper_name,\n\t\t\t\tenc_conf.encryption_password\n\t\t\t)\n\n\t\t\tif not luks_handler.mapper_dev:\n\t\t\t\traise DiskError('Failed to unlock luks device')\n\n\t\t\tself.mount(luks_handler.mapper_dev, self._TMP_BTRFS_MOUNT, create_target_mountpoint=True)\n\t\telse:\n\t\t\tself.mount(part_mod.safe_dev_path, self._TMP_BTRFS_MOUNT, create_target_mountpoint=True)\n\n\t\tfor sub_vol in part_mod.btrfs_subvols:\n\t\t\tdebug(f'Creating subvolume: {sub_vol.name}')\n\n\t\t\tif luks_handler is not None:\n\t\t\t\tsubvol_path = self._TMP_BTRFS_MOUNT / sub_vol.name\n\t\t\telse:\n\t\t\t\tsubvol_path = self._TMP_BTRFS_MOUNT / sub_vol.name\n\n\t\t\tSysCommand(f\"btrfs subvolume create {subvol_path}\")\n\n\t\t\tif sub_vol.nodatacow:\n\t\t\t\ttry:\n\t\t\t\t\tSysCommand(f'chattr +C {subvol_path}')\n\t\t\t\texcept SysCallError as err:\n\t\t\t\t\traise DiskError(f'Could not set nodatacow attribute at {subvol_path}: {err}')\n\n\t\t\tif sub_vol.compress:\n\t\t\t\ttry:\n\t\t\t\t\tSysCommand(f'chattr +c {subvol_path}')\n\t\t\t\texcept SysCallError as err:\n\t\t\t\t\traise DiskError(f'Could not set compress attribute at {subvol_path}: {err}')\n\n\t\tif luks_handler is not None and luks_handler.mapper_dev is not None:\n\t\t\tself.umount(luks_handler.mapper_dev)\n\t\t\tluks_handler.lock()\n\t\telse:\n\t\t\tself.umount(part_mod.safe_dev_path)\n\n\tdef unlock_luks2_dev(self, dev_path: Path, mapper_name: str, enc_password: str) -> Luks2:\n\t\tluks_handler = Luks2(dev_path, mapper_name=mapper_name, password=enc_password)\n\n\t\tif not luks_handler.is_unlocked():\n\t\t\tluks_handler.unlock()\n\n\t\tif not luks_handler.is_unlocked():\n\t\t\traise DiskError(f'Failed to unlock luks2 device: {dev_path}')\n\n\t\treturn luks_handler\n\n\tdef _umount_all_existing(self, modification: DeviceModification):\n\t\tinfo(f'Unmounting all partitions: {modification.device_path}')\n\n\t\texisting_partitions = self._devices[modification.device_path].partition_infos\n\n\t\tfor partition in existing_partitions:\n\t\t\tdebug(f'Unmounting: {partition.path}')\n\n\t\t\t# un-mount for existing encrypted partitions\n\t\t\tif partition.fs_type == FilesystemType.Crypto_luks:\n\t\t\t\tLuks2(partition.path).lock()\n\t\t\telse:\n\t\t\t\tself.umount(partition.path, recursive=True)\n\n\tdef partition(\n\t\tself,\n\t\tmodification: DeviceModification,\n\t\tpartition_table: Optional[PartitionTable] = None\n\t):\n\t\t\"\"\"\n\t\tCreate a partition table on the block device and create all partitions.\n\t\t\"\"\"\n\t\tif modification.wipe:\n\t\t\tif partition_table is None:\n\t\t\t\traise ValueError('Modification is marked as wipe but no partitioning table was provided')\n\n\t\t\tif partition_table.MBR and len(modification.partitions) > 3:\n\t\t\t\traise DiskError('Too many partitions on disk, MBR disks can only have 3 primary partitions')\n\n\t\t# make sure all devices are unmounted\n\t\tself._umount_all_existing(modification)\n\n\t\t# WARNING: the entire device will be wiped and all data lost\n\t\tif modification.wipe:\n\t\t\tself.wipe_dev(modification.device)\n\t\t\tpart_table = partition_table.value if partition_table else None\n\t\t\tdisk = freshDisk(modification.device.disk.device, part_table)\n\t\telse:\n\t\t\tinfo(f'Use existing device: {modification.device_path}')\n\t\t\tdisk = modification.device.disk\n\n\t\tinfo(f'Creating partitions: {modification.device_path}')\n\n\t\t# TODO sort by delete first\n\n\t\tfor part_mod in modification.partitions:\n\t\t\t# don't touch existing partitions\n\t\t\tif part_mod.exists():\n\t\t\t\tcontinue\n\n\t\t\t# if the entire disk got nuked then we don't have to delete\n\t\t\t# any existing partitions anymore because they're all gone already\n\t\t\trequires_delete = modification.wipe is False\n\t\t\tself._perform_partitioning(part_mod, modification.device, disk, requires_delete=requires_delete)\n\n\t\tself.partprobe(modification.device.device_info.path)\n\n\tdef mount(\n\t\tself,\n\t\tdev_path: Path,\n\t\ttarget_mountpoint: Path,\n\t\tmount_fs: Optional[str] = None,\n\t\tcreate_target_mountpoint: bool = True,\n\t\toptions: List[str] = []\n\t):\n\t\tif create_target_mountpoint and not target_mountpoint.exists():\n\t\t\ttarget_mountpoint.mkdir(parents=True, exist_ok=True)\n\n\t\tif not target_mountpoint.exists():\n\t\t\traise ValueError('Target mountpoint does not exist')\n\n\t\tlsblk_info = get_lsblk_info(dev_path)\n\t\tif target_mountpoint in lsblk_info.mountpoints:\n\t\t\tinfo(f'Device already mounted at {target_mountpoint}')\n\t\t\treturn\n\n\t\tstr_options = ','.join(options)\n\t\tstr_options = f'-o {str_options}' if str_options else ''\n\n\t\tmount_fs = f'-t {mount_fs}' if mount_fs else ''\n\n\t\tcommand = f'mount {mount_fs} {str_options} {dev_path} {target_mountpoint}'\n\n\t\tdebug(f'Mounting {dev_path}: command')\n\n\t\ttry:\n\t\t\tSysCommand(command)\n\t\texcept SysCallError as err:\n\t\t\traise DiskError(f'Could not mount {dev_path}: {command}\\n{err.message}')\n\n\tdef umount(self, mountpoint: Path, recursive: bool = False):\n\t\ttry:\n\t\t\tlsblk_info = get_lsblk_info(mountpoint)\n\t\texcept SysCallError as ex:\n\t\t\t# this could happen if before partitioning the device contained 3 partitions\n\t\t\t# and after partitioning only 2 partitions were created, then the modifications object\n\t\t\t# will have a reference to /dev/sX3 which is being tried to umount here now\n\t\t\tif 'not a block device' in ex.message:\n\t\t\t\treturn\n\t\t\traise ex\n\n\t\tif len(lsblk_info.mountpoints) > 0:\n\t\t\tdebug(f'Partition {mountpoint} is currently mounted at: {[str(m) for m in lsblk_info.mountpoints]}')\n\n\t\t\tfor mountpoint in lsblk_info.mountpoints:\n\t\t\t\tdebug(f'Unmounting mountpoint: {mountpoint}')\n\n\t\t\t\tcommand = 'umount'\n\n\t\t\t\tif recursive:\n\t\t\t\t\tcommand += ' -R'\n\n\t\t\t\tSysCommand(f'{command} {mountpoint}')\n\n\tdef detect_pre_mounted_mods(self, base_mountpoint: Path) -> List[DeviceModification]:\n\t\tpart_mods: Dict[Path, List[PartitionModification]] = {}\n\n\t\tfor device in self.devices:\n\t\t\tfor part_info in device.partition_infos:\n\t\t\t\tfor mountpoint in part_info.mountpoints:\n\t\t\t\t\tif is_subpath(mountpoint, base_mountpoint):\n\t\t\t\t\t\tpath = Path(part_info.disk.device.path)\n\t\t\t\t\t\tpart_mods.setdefault(path, [])\n\t\t\t\t\t\tpart_mods[path].append(PartitionModification.from_existing_partition(part_info))\n\t\t\t\t\t\tbreak\n\n\t\tdevice_mods: List[DeviceModification] = []\n\t\tfor device_path, mods in part_mods.items():\n\t\t\tdevice_mod = DeviceModification(self._devices[device_path], False, mods)\n\t\t\tdevice_mods.append(device_mod)\n\n\t\treturn device_mods\n\n\tdef partprobe(self, path: Optional[Path] = None):\n\t\tif path is not None:\n\t\t\tcommand = f'partprobe {path}'\n\t\telse:\n\t\t\tcommand = 'partprobe'\n\n\t\ttry:\n\t\t\tdebug(f'Calling partprobe: {command}')\n\t\t\tSysCommand(command)\n\t\texcept SysCallError as err:\n\t\t\terror(f'\"{command}\" failed to run: {err}')\n\n\tdef _wipe(self, dev_path: Path):\n\t\t\"\"\"\n\t\tWipe a device (partition or otherwise) of meta-data, be it file system, LVM, etc.\n\t\t@param dev_path: Device path of the partition to be wiped.\n\t\t@type dev_path: str\n\t\t\"\"\"\n\t\twith open(dev_path, 'wb') as p:\n\t\t\tp.write(bytearray(1024))\n\n\tdef wipe_dev(self, block_device: BDevice):\n\t\t\"\"\"\n\t\tWipe the block device of meta-data, be it file system, LVM, etc.\n\t\tThis is not intended to be secure, but rather to ensure that\n\t\tauto-discovery tools don't recognize anything here.\n\t\t\"\"\"\n\t\tinfo(f'Wiping partitions and metadata: {block_device.device_info.path}')\n\t\tfor partition in block_device.partition_infos:\n\t\t\tself._wipe(partition.path)\n\n\t\tself._wipe(block_device.device_info.path)\n\n\ndevice_handler = DeviceHandler()\n\n\ndef disk_layouts() -> str:\n\ttry:\n\t\tlsblk_info = get_all_lsblk_info()\n\t\treturn json.dumps(lsblk_info, indent=4, sort_keys=True, cls=JSON)\n\texcept SysCallError as err:\n\t\twarn(f\"Could not return disk layouts: {err}\")\n\t\treturn ''\n\texcept json.decoder.JSONDecodeError as err:\n\t\twarn(f\"Could not return disk layouts: {err}\")\n\t\treturn ''\n",
"path": "archinstall/lib/disk/device_handler.py"
}
] | [
{
"content": "from __future__ import annotations\n\nimport json\nimport os\nimport time\nfrom pathlib import Path\nfrom typing import List, Dict, Any, Optional, TYPE_CHECKING\n\nfrom parted import ( # type: ignore\n\tDisk, Geometry, FileSystem,\n\tPartitionException, DiskLabelException,\n\tgetAllDevices, freshDisk, Partition, Device\n)\n\nfrom .device_model import (\n\tDeviceModification, PartitionModification,\n\tBDevice, _DeviceInfo, _PartitionInfo,\n\tFilesystemType, Unit, PartitionTable,\n\tModificationStatus, get_lsblk_info, LsblkInfo,\n\t_BtrfsSubvolumeInfo, get_all_lsblk_info, DiskEncryption\n)\n\nfrom ..exceptions import DiskError, UnknownFilesystemFormat\nfrom ..general import SysCommand, SysCallError, JSON\nfrom ..luks import Luks2\nfrom ..output import debug, error, info, warn\nfrom ..utils.util import is_subpath\n\nif TYPE_CHECKING:\n\t_: Any\n\n\nclass DeviceHandler(object):\n\t_TMP_BTRFS_MOUNT = Path('/mnt/arch_btrfs')\n\n\tdef __init__(self):\n\t\tself._devices: Dict[Path, BDevice] = {}\n\t\tself.load_devices()\n\n\t@property\n\tdef devices(self) -> List[BDevice]:\n\t\treturn list(self._devices.values())\n\n\tdef load_devices(self):\n\t\tblock_devices = {}\n\n\t\tfor device in getAllDevices():\n\t\t\tif get_lsblk_info(device.path).type == 'rom':\n\t\t\t\tcontinue\n\n\t\t\ttry:\n\t\t\t\tdisk = Disk(device)\n\t\t\texcept DiskLabelException as err:\n\t\t\t\tif 'unrecognised disk label' in getattr(error, 'message', str(err)):\n\t\t\t\t\tdisk = freshDisk(device, PartitionTable.GPT.value)\n\t\t\t\telse:\n\t\t\t\t\tdebug(f'Unable to get disk from device: {device}')\n\t\t\t\t\tcontinue\n\n\t\t\tdevice_info = _DeviceInfo.from_disk(disk)\n\t\t\tpartition_infos = []\n\n\t\t\tfor partition in disk.partitions:\n\t\t\t\tlsblk_info = get_lsblk_info(partition.path)\n\t\t\t\tfs_type = self._determine_fs_type(partition, lsblk_info)\n\t\t\t\tsubvol_infos = []\n\n\t\t\t\tif fs_type == FilesystemType.Btrfs:\n\t\t\t\t\tsubvol_infos = self.get_btrfs_info(partition.path)\n\n\t\t\t\tpartition_infos.append(\n\t\t\t\t\t_PartitionInfo.from_partition(\n\t\t\t\t\t\tpartition,\n\t\t\t\t\t\tfs_type,\n\t\t\t\t\t\tlsblk_info.partuuid,\n\t\t\t\t\t\tlsblk_info.mountpoints,\n\t\t\t\t\t\tsubvol_infos\n\t\t\t\t\t)\n\t\t\t\t)\n\n\t\t\tblock_device = BDevice(disk, device_info, partition_infos)\n\t\t\tblock_devices[block_device.device_info.path] = block_device\n\n\t\tself._devices = block_devices\n\n\tdef _determine_fs_type(\n\t\tself,\n\t\tpartition: Partition,\n\t\tlsblk_info: Optional[LsblkInfo] = None\n\t) -> Optional[FilesystemType]:\n\t\ttry:\n\t\t\tif partition.fileSystem:\n\t\t\t\treturn FilesystemType(partition.fileSystem.type)\n\t\t\telif lsblk_info is not None:\n\t\t\t\treturn FilesystemType(lsblk_info.fstype) if lsblk_info.fstype else None\n\t\t\treturn None\n\t\texcept ValueError:\n\t\t\tdebug(f'Could not determine the filesystem: {partition.fileSystem}')\n\n\t\treturn None\n\n\tdef get_device(self, path: Path) -> Optional[BDevice]:\n\t\treturn self._devices.get(path, None)\n\n\tdef get_device_by_partition_path(self, partition_path: Path) -> Optional[BDevice]:\n\t\tpartition = self.find_partition(partition_path)\n\t\tif partition:\n\t\t\tdevice: Device = partition.disk.device\n\t\t\treturn self.get_device(Path(device.path))\n\t\treturn None\n\n\tdef find_partition(self, path: Path) -> Optional[_PartitionInfo]:\n\t\tfor device in self._devices.values():\n\t\t\tpart = next(filter(lambda x: str(x.path) == str(path), device.partition_infos), None)\n\t\t\tif part is not None:\n\t\t\t\treturn part\n\t\treturn None\n\n\tdef get_uuid_for_path(self, path: Path) -> Optional[str]:\n\t\tpartition = self.find_partition(path)\n\t\treturn partition.partuuid if partition else None\n\n\tdef get_btrfs_info(self, dev_path: Path) -> List[_BtrfsSubvolumeInfo]:\n\t\tlsblk_info = get_lsblk_info(dev_path)\n\t\tsubvol_infos: List[_BtrfsSubvolumeInfo] = []\n\n\t\tif not lsblk_info.mountpoint:\n\t\t\tself.mount(dev_path, self._TMP_BTRFS_MOUNT, create_target_mountpoint=True)\n\t\t\tmountpoint = self._TMP_BTRFS_MOUNT\n\t\telse:\n\t\t\t# when multiple subvolumes are mounted then the lsblk output may look like\n\t\t\t# \"mountpoint\": \"/mnt/archinstall/.snapshots\"\n\t\t\t# \"mountpoints\": [\"/mnt/archinstall/.snapshots\", \"/mnt/archinstall/home\", ..]\n\t\t\t# so we'll determine the minimum common path and assume that's the root\n\t\t\tpath_strings = [str(m) for m in lsblk_info.mountpoints]\n\t\t\tcommon_prefix = os.path.commonprefix(path_strings)\n\t\t\tmountpoint = Path(common_prefix)\n\n\t\ttry:\n\t\t\tresult = SysCommand(f'btrfs subvolume list {mountpoint}')\n\t\texcept SysCallError as err:\n\t\t\tdebug(f'Failed to read btrfs subvolume information: {err}')\n\t\t\treturn subvol_infos\n\n\t\ttry:\n\t\t\tif decoded := result.decode('utf-8'):\n\t\t\t\t# ID 256 gen 16 top level 5 path @\n\t\t\t\tfor line in decoded.splitlines():\n\t\t\t\t\t# expected output format:\n\t\t\t\t\t# ID 257 gen 8 top level 5 path @home\n\t\t\t\t\tname = Path(line.split(' ')[-1])\n\t\t\t\t\tsub_vol_mountpoint = lsblk_info.btrfs_subvol_info.get(name, None)\n\t\t\t\t\tsubvol_infos.append(_BtrfsSubvolumeInfo(name, sub_vol_mountpoint))\n\t\texcept json.decoder.JSONDecodeError as err:\n\t\t\terror(f\"Could not decode lsblk JSON: {result}\")\n\t\t\traise err\n\n\t\tif not lsblk_info.mountpoint:\n\t\t\tself.umount(dev_path)\n\n\t\treturn subvol_infos\n\n\tdef _perform_formatting(\n\t\tself,\n\t\tfs_type: FilesystemType,\n\t\tpath: Path,\n\t\tadditional_parted_options: List[str] = []\n\t):\n\t\toptions = []\n\t\tcommand = ''\n\n\t\tmatch fs_type:\n\t\t\tcase FilesystemType.Btrfs:\n\t\t\t\toptions += ['-f']\n\t\t\t\tcommand += 'mkfs.btrfs'\n\t\t\tcase FilesystemType.Fat16:\n\t\t\t\toptions += ['-F16']\n\t\t\t\tcommand += 'mkfs.fat'\n\t\t\tcase FilesystemType.Fat32:\n\t\t\t\toptions += ['-F32']\n\t\t\t\tcommand += 'mkfs.fat'\n\t\t\tcase FilesystemType.Ext2:\n\t\t\t\toptions += ['-F']\n\t\t\t\tcommand += 'mkfs.ext2'\n\t\t\tcase FilesystemType.Ext3:\n\t\t\t\toptions += ['-F']\n\t\t\t\tcommand += 'mkfs.ext3'\n\t\t\tcase FilesystemType.Ext4:\n\t\t\t\toptions += ['-F']\n\t\t\t\tcommand += 'mkfs.ext4'\n\t\t\tcase FilesystemType.Xfs:\n\t\t\t\toptions += ['-f']\n\t\t\t\tcommand += 'mkfs.xfs'\n\t\t\tcase FilesystemType.F2fs:\n\t\t\t\toptions += ['-f']\n\t\t\t\tcommand += 'mkfs.f2fs'\n\t\t\tcase FilesystemType.Ntfs:\n\t\t\t\toptions += ['-f', '-Q']\n\t\t\t\tcommand += 'mkfs.ntfs'\n\t\t\tcase FilesystemType.Reiserfs:\n\t\t\t\tcommand += 'mkfs.reiserfs'\n\t\t\tcase _:\n\t\t\t\traise UnknownFilesystemFormat(f'Filetype \"{fs_type.value}\" is not supported')\n\n\t\toptions += additional_parted_options\n\t\toptions_str = ' '.join(options)\n\n\t\tinfo(f'Formatting filesystem: /usr/bin/{command} {options_str} {path}')\n\n\t\ttry:\n\t\t\tSysCommand(f\"/usr/bin/{command} {options_str} {path}\")\n\t\texcept SysCallError as err:\n\t\t\tmsg = f'Could not format {path} with {fs_type.value}: {err.message}'\n\t\t\terror(msg)\n\t\t\traise DiskError(msg) from err\n\n\tdef _perform_enc_formatting(\n\t\tself,\n\t\tdev_path: Path,\n\t\tmapper_name: Optional[str],\n\t\tfs_type: FilesystemType,\n\t\tenc_conf: DiskEncryption\n\t):\n\t\tluks_handler = Luks2(\n\t\t\tdev_path,\n\t\t\tmapper_name=mapper_name,\n\t\t\tpassword=enc_conf.encryption_password\n\t\t)\n\n\t\tkey_file = luks_handler.encrypt()\n\n\t\tdebug(f'Unlocking luks2 device: {dev_path}')\n\t\tluks_handler.unlock(key_file=key_file)\n\n\t\tif not luks_handler.mapper_dev:\n\t\t\traise DiskError('Failed to unlock luks device')\n\n\t\tinfo(f'luks2 formatting mapper dev: {luks_handler.mapper_dev}')\n\t\tself._perform_formatting(fs_type, luks_handler.mapper_dev)\n\n\t\tinfo(f'luks2 locking device: {dev_path}')\n\t\tluks_handler.lock()\n\n\tdef _validate(self, device_mod: DeviceModification):\n\t\tchecks = {\n\t\t\t# verify that all partitions have a path set (which implies that they have been created)\n\t\t\tlambda x: x.dev_path is None: ValueError('When formatting, all partitions must have a path set'),\n\t\t\t# crypto luks is not a valid file system type\n\t\t\tlambda x: x.fs_type is FilesystemType.Crypto_luks: ValueError('Crypto luks cannot be set as a filesystem type'),\n\t\t\t# file system type must be set\n\t\t\tlambda x: x.fs_type is None: ValueError('File system type must be set for modification')\n\t\t}\n\n\t\tfor check, exc in checks.items():\n\t\t\tfound = next(filter(check, device_mod.partitions), None)\n\t\t\tif found is not None:\n\t\t\t\traise exc\n\n\tdef format(\n\t\tself,\n\t\tdevice_mod: DeviceModification,\n\t\tenc_conf: Optional['DiskEncryption'] = None\n\t):\n\t\t\"\"\"\n\t\tFormat can be given an overriding path, for instance /dev/null to test\n\t\tthe formatting functionality and in essence the support for the given filesystem.\n\t\t\"\"\"\n\t\tself._validate(device_mod)\n\n\t\t# make sure all devices are unmounted\n\t\tself._umount_all_existing(device_mod)\n\n\t\tfor part_mod in device_mod.partitions:\n\t\t\t# partition will be encrypted\n\t\t\tif enc_conf is not None and part_mod in enc_conf.partitions:\n\t\t\t\tself._perform_enc_formatting(\n\t\t\t\t\tpart_mod.safe_dev_path,\n\t\t\t\t\tpart_mod.mapper_name,\n\t\t\t\t\tpart_mod.safe_fs_type,\n\t\t\t\t\tenc_conf\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\tself._perform_formatting(part_mod.safe_fs_type, part_mod.safe_dev_path)\n\n\tdef _perform_partitioning(\n\t\tself,\n\t\tpart_mod: PartitionModification,\n\t\tblock_device: BDevice,\n\t\tdisk: Disk,\n\t\trequires_delete: bool\n\t):\n\t\t# when we require a delete and the partition to be (re)created\n\t\t# already exists then we have to delete it first\n\t\tif requires_delete and part_mod.status in [ModificationStatus.Modify, ModificationStatus.Delete]:\n\t\t\tinfo(f'Delete existing partition: {part_mod.safe_dev_path}')\n\t\t\tpart_info = self.find_partition(part_mod.safe_dev_path)\n\n\t\t\tif not part_info:\n\t\t\t\traise DiskError(f'No partition for dev path found: {part_mod.safe_dev_path}')\n\n\t\t\tdisk.deletePartition(part_info.partition)\n\t\t\tdisk.commit()\n\n\t\tif part_mod.status == ModificationStatus.Delete:\n\t\t\treturn\n\n\t\tstart_sector = part_mod.start.convert(\n\t\t\tUnit.sectors,\n\t\t\tblock_device.device_info.sector_size\n\t\t)\n\n\t\tlength_sector = part_mod.length.convert(\n\t\t\tUnit.sectors,\n\t\t\tblock_device.device_info.sector_size\n\t\t)\n\n\t\tgeometry = Geometry(\n\t\t\tdevice=block_device.disk.device,\n\t\t\tstart=start_sector.value,\n\t\t\tlength=length_sector.value\n\t\t)\n\n\t\tfilesystem = FileSystem(type=part_mod.safe_fs_type.value, geometry=geometry)\n\n\t\tpartition = Partition(\n\t\t\tdisk=disk,\n\t\t\ttype=part_mod.type.get_partition_code(),\n\t\t\tfs=filesystem,\n\t\t\tgeometry=geometry\n\t\t)\n\n\t\tfor flag in part_mod.flags:\n\t\t\tpartition.setFlag(flag.value)\n\n\t\tdebug(f'\\tType: {part_mod.type.value}')\n\t\tdebug(f'\\tFilesystem: {part_mod.safe_fs_type.value}')\n\t\tdebug(f'\\tGeometry: {start_sector.value} start sector, {length_sector.value} length')\n\n\t\ttry:\n\t\t\tdisk.addPartition(partition=partition, constraint=disk.device.optimalAlignedConstraint)\n\t\t\tdisk.commit()\n\n\t\t\t# the creation will take a bit of time\n\t\t\ttime.sleep(3)\n\n\t\t\t# the partition has a real path now as it was created\n\t\t\tpart_mod.dev_path = Path(partition.path)\n\n\t\t\tlsblk_info = self._fetch_partuuid(part_mod.dev_path)\n\n\t\t\tpart_mod.partuuid = lsblk_info.partuuid\n\t\t\tpart_mod.uuid = lsblk_info.uuid\n\t\texcept PartitionException as ex:\n\t\t\traise DiskError(f'Unable to add partition, most likely due to overlapping sectors: {ex}') from ex\n\n\tdef _fetch_partuuid(self, path: Path) -> LsblkInfo:\n\t\tattempts = 3\n\t\tlsblk_info: Optional[LsblkInfo] = None\n\n\t\tself.partprobe(path)\n\t\tfor attempt_nr in range(attempts):\n\t\t\ttime.sleep(attempt_nr + 1)\n\t\t\tlsblk_info = get_lsblk_info(path)\n\n\t\t\tif lsblk_info.partuuid:\n\t\t\t\tbreak\n\n\t\t\tself.partprobe(path)\n\n\t\tif not lsblk_info or not lsblk_info.partuuid:\n\t\t\tdebug(f'Unable to determine new partition uuid: {path}\\n{lsblk_info}')\n\t\t\traise DiskError(f'Unable to determine new partition uuid: {path}')\n\n\t\tdebug(f'partuuid found: {lsblk_info.json()}')\n\n\t\treturn lsblk_info\n\n\tdef create_btrfs_volumes(\n\t\tself,\n\t\tpart_mod: PartitionModification,\n\t\tenc_conf: Optional['DiskEncryption'] = None\n\t):\n\t\tinfo(f'Creating subvolumes: {part_mod.safe_dev_path}')\n\n\t\tluks_handler = None\n\n\t\t# unlock the partition first if it's encrypted\n\t\tif enc_conf is not None and part_mod in enc_conf.partitions:\n\t\t\tif not part_mod.mapper_name:\n\t\t\t\traise ValueError('No device path specified for modification')\n\n\t\t\tluks_handler = self.unlock_luks2_dev(\n\t\t\t\tpart_mod.safe_dev_path,\n\t\t\t\tpart_mod.mapper_name,\n\t\t\t\tenc_conf.encryption_password\n\t\t\t)\n\n\t\t\tif not luks_handler.mapper_dev:\n\t\t\t\traise DiskError('Failed to unlock luks device')\n\n\t\t\tself.mount(luks_handler.mapper_dev, self._TMP_BTRFS_MOUNT, create_target_mountpoint=True)\n\t\telse:\n\t\t\tself.mount(part_mod.safe_dev_path, self._TMP_BTRFS_MOUNT, create_target_mountpoint=True)\n\n\t\tfor sub_vol in part_mod.btrfs_subvols:\n\t\t\tdebug(f'Creating subvolume: {sub_vol.name}')\n\n\t\t\tif luks_handler is not None:\n\t\t\t\tsubvol_path = self._TMP_BTRFS_MOUNT / sub_vol.name\n\t\t\telse:\n\t\t\t\tsubvol_path = self._TMP_BTRFS_MOUNT / sub_vol.name\n\n\t\t\tSysCommand(f\"btrfs subvolume create {subvol_path}\")\n\n\t\t\tif sub_vol.nodatacow:\n\t\t\t\ttry:\n\t\t\t\t\tSysCommand(f'chattr +C {subvol_path}')\n\t\t\t\texcept SysCallError as err:\n\t\t\t\t\traise DiskError(f'Could not set nodatacow attribute at {subvol_path}: {err}')\n\n\t\t\tif sub_vol.compress:\n\t\t\t\ttry:\n\t\t\t\t\tSysCommand(f'chattr +c {subvol_path}')\n\t\t\t\texcept SysCallError as err:\n\t\t\t\t\traise DiskError(f'Could not set compress attribute at {subvol_path}: {err}')\n\n\t\tif luks_handler is not None and luks_handler.mapper_dev is not None:\n\t\t\tself.umount(luks_handler.mapper_dev)\n\t\t\tluks_handler.lock()\n\t\telse:\n\t\t\tself.umount(part_mod.safe_dev_path)\n\n\tdef unlock_luks2_dev(self, dev_path: Path, mapper_name: str, enc_password: str) -> Luks2:\n\t\tluks_handler = Luks2(dev_path, mapper_name=mapper_name, password=enc_password)\n\n\t\tif not luks_handler.is_unlocked():\n\t\t\tluks_handler.unlock()\n\n\t\tif not luks_handler.is_unlocked():\n\t\t\traise DiskError(f'Failed to unlock luks2 device: {dev_path}')\n\n\t\treturn luks_handler\n\n\tdef _umount_all_existing(self, modification: DeviceModification):\n\t\tinfo(f'Unmounting all partitions: {modification.device_path}')\n\n\t\texisting_partitions = self._devices[modification.device_path].partition_infos\n\n\t\tfor partition in existing_partitions:\n\t\t\tdebug(f'Unmounting: {partition.path}')\n\n\t\t\t# un-mount for existing encrypted partitions\n\t\t\tif partition.fs_type == FilesystemType.Crypto_luks:\n\t\t\t\tLuks2(partition.path).lock()\n\t\t\telse:\n\t\t\t\tself.umount(partition.path, recursive=True)\n\n\tdef partition(\n\t\tself,\n\t\tmodification: DeviceModification,\n\t\tpartition_table: Optional[PartitionTable] = None\n\t):\n\t\t\"\"\"\n\t\tCreate a partition table on the block device and create all partitions.\n\t\t\"\"\"\n\t\tif modification.wipe:\n\t\t\tif partition_table is None:\n\t\t\t\traise ValueError('Modification is marked as wipe but no partitioning table was provided')\n\n\t\t\tif partition_table.MBR and len(modification.partitions) > 3:\n\t\t\t\traise DiskError('Too many partitions on disk, MBR disks can only have 3 primary partitions')\n\n\t\t# make sure all devices are unmounted\n\t\tself._umount_all_existing(modification)\n\n\t\t# WARNING: the entire device will be wiped and all data lost\n\t\tif modification.wipe:\n\t\t\tself.wipe_dev(modification.device)\n\t\t\tpart_table = partition_table.value if partition_table else None\n\t\t\tdisk = freshDisk(modification.device.disk.device, part_table)\n\t\telse:\n\t\t\tinfo(f'Use existing device: {modification.device_path}')\n\t\t\tdisk = modification.device.disk\n\n\t\tinfo(f'Creating partitions: {modification.device_path}')\n\n\t\t# TODO sort by delete first\n\n\t\tfor part_mod in modification.partitions:\n\t\t\t# don't touch existing partitions\n\t\t\tif part_mod.exists():\n\t\t\t\tcontinue\n\n\t\t\t# if the entire disk got nuked then we don't have to delete\n\t\t\t# any existing partitions anymore because they're all gone already\n\t\t\trequires_delete = modification.wipe is False\n\t\t\tself._perform_partitioning(part_mod, modification.device, disk, requires_delete=requires_delete)\n\n\t\tself.partprobe(modification.device.device_info.path)\n\n\tdef mount(\n\t\tself,\n\t\tdev_path: Path,\n\t\ttarget_mountpoint: Path,\n\t\tmount_fs: Optional[str] = None,\n\t\tcreate_target_mountpoint: bool = True,\n\t\toptions: List[str] = []\n\t):\n\t\tif create_target_mountpoint and not target_mountpoint.exists():\n\t\t\ttarget_mountpoint.mkdir(parents=True, exist_ok=True)\n\n\t\tif not target_mountpoint.exists():\n\t\t\traise ValueError('Target mountpoint does not exist')\n\n\t\tlsblk_info = get_lsblk_info(dev_path)\n\t\tif target_mountpoint in lsblk_info.mountpoints:\n\t\t\tinfo(f'Device already mounted at {target_mountpoint}')\n\t\t\treturn\n\n\t\tstr_options = ','.join(options)\n\t\tstr_options = f'-o {str_options}' if str_options else ''\n\n\t\tmount_fs = f'-t {mount_fs}' if mount_fs else ''\n\n\t\tcommand = f'mount {mount_fs} {str_options} {dev_path} {target_mountpoint}'\n\n\t\tdebug(f'Mounting {dev_path}: command')\n\n\t\ttry:\n\t\t\tSysCommand(command)\n\t\texcept SysCallError as err:\n\t\t\traise DiskError(f'Could not mount {dev_path}: {command}\\n{err.message}')\n\n\tdef umount(self, mountpoint: Path, recursive: bool = False):\n\t\ttry:\n\t\t\tlsblk_info = get_lsblk_info(mountpoint)\n\t\texcept SysCallError as ex:\n\t\t\t# this could happen if before partitioning the device contained 3 partitions\n\t\t\t# and after partitioning only 2 partitions were created, then the modifications object\n\t\t\t# will have a reference to /dev/sX3 which is being tried to umount here now\n\t\t\tif 'not a block device' in ex.message:\n\t\t\t\treturn\n\t\t\traise ex\n\n\t\tif len(lsblk_info.mountpoints) > 0:\n\t\t\tdebug(f'Partition {mountpoint} is currently mounted at: {[str(m) for m in lsblk_info.mountpoints]}')\n\n\t\t\tfor mountpoint in lsblk_info.mountpoints:\n\t\t\t\tdebug(f'Unmounting mountpoint: {mountpoint}')\n\n\t\t\t\tcommand = 'umount'\n\n\t\t\t\tif recursive:\n\t\t\t\t\tcommand += ' -R'\n\n\t\t\t\tSysCommand(f'{command} {mountpoint}')\n\n\tdef detect_pre_mounted_mods(self, base_mountpoint: Path) -> List[DeviceModification]:\n\t\tpart_mods: Dict[Path, List[PartitionModification]] = {}\n\n\t\tfor device in self.devices:\n\t\t\tfor part_info in device.partition_infos:\n\t\t\t\tfor mountpoint in part_info.mountpoints:\n\t\t\t\t\tif is_subpath(mountpoint, base_mountpoint):\n\t\t\t\t\t\tpath = Path(part_info.disk.device.path)\n\t\t\t\t\t\tpart_mods.setdefault(path, [])\n\t\t\t\t\t\tpart_mods[path].append(PartitionModification.from_existing_partition(part_info))\n\t\t\t\t\t\tbreak\n\n\t\tdevice_mods: List[DeviceModification] = []\n\t\tfor device_path, mods in part_mods.items():\n\t\t\tdevice_mod = DeviceModification(self._devices[device_path], False, mods)\n\t\t\tdevice_mods.append(device_mod)\n\n\t\treturn device_mods\n\n\tdef partprobe(self, path: Optional[Path] = None):\n\t\tif path is not None:\n\t\t\tcommand = f'partprobe {path}'\n\t\telse:\n\t\t\tcommand = 'partprobe'\n\n\t\ttry:\n\t\t\tdebug(f'Calling partprobe: {command}')\n\t\t\tSysCommand(command)\n\t\texcept SysCallError as err:\n\t\t\terror(f'\"{command}\" failed to run: {err}')\n\n\tdef _wipe(self, dev_path: Path):\n\t\t\"\"\"\n\t\tWipe a device (partition or otherwise) of meta-data, be it file system, LVM, etc.\n\t\t@param dev_path: Device path of the partition to be wiped.\n\t\t@type dev_path: str\n\t\t\"\"\"\n\t\twith open(dev_path, 'wb') as p:\n\t\t\tp.write(bytearray(1024))\n\n\tdef wipe_dev(self, block_device: BDevice):\n\t\t\"\"\"\n\t\tWipe the block device of meta-data, be it file system, LVM, etc.\n\t\tThis is not intended to be secure, but rather to ensure that\n\t\tauto-discovery tools don't recognize anything here.\n\t\t\"\"\"\n\t\tinfo(f'Wiping partitions and metadata: {block_device.device_info.path}')\n\t\tfor partition in block_device.partition_infos:\n\t\t\tself._wipe(partition.path)\n\n\t\tself._wipe(block_device.device_info.path)\n\n\ndevice_handler = DeviceHandler()\n\n\ndef disk_layouts() -> str:\n\ttry:\n\t\tlsblk_info = get_all_lsblk_info()\n\t\treturn json.dumps(lsblk_info, indent=4, sort_keys=True, cls=JSON)\n\texcept SysCallError as err:\n\t\twarn(f\"Could not return disk layouts: {err}\")\n\t\treturn ''\n\texcept json.decoder.JSONDecodeError as err:\n\t\twarn(f\"Could not return disk layouts: {err}\")\n\t\treturn ''\n",
"path": "archinstall/lib/disk/device_handler.py"
}
] | diff --git a/archinstall/lib/disk/device_handler.py b/archinstall/lib/disk/device_handler.py
index 2c88e382cb..9acf09990a 100644
--- a/archinstall/lib/disk/device_handler.py
+++ b/archinstall/lib/disk/device_handler.py
@@ -45,6 +45,9 @@ def load_devices(self):
block_devices = {}
for device in getAllDevices():
+ if get_lsblk_info(device.path).type == 'rom':
+ continue
+
try:
disk = Disk(device)
except DiskLabelException as err:
|
ibis-project__ibis-5647 | bug(postgres): cannot connect to postgres table with `tsvector` column
### What happened?
Reposting from https://stackoverflow.com/questions/74520302/why-cant-i-connect-ibis-to-a-postgres-table-with-a-tsvector-column
Implementing whatever postgres functionality exists around tsvector may not be something we want to do, but I think we should at least allow connecting to the table.
### What version of ibis are you using?
3.2.0
### What backend(s) are you using, if any?
postgres
### Relevant log output
```sh
KeyError Traceback (most recent call last)
File ~/anaconda3/envs/ec_extract/lib/python3.9/site-packages/multipledispatch/dispatcher.py:269, in Dispatcher.__call__(self, *args, **kwargs)
268 try:
--> 269 func = self._cache[types]
270 except KeyError:
KeyError: (<class 'sqlalchemy.dialects.postgresql.psycopg2.PGDialect_psycopg2'>, <class 'sqlalchemy.dialects.postgresql.base.TSVECTOR'>)
During handling of the above exception, another exception occurred:
NotImplementedError Traceback (most recent call last)
***
----> 29 main_table = con.table(table_name)[columns['column_list']]
File ~/anaconda3/envs/ec_extract/lib/python3.9/site-packages/ibis/backends/base/sql/alchemy/__init__.py:438, in BaseAlchemyBackend.table(self, name, database, schema)
428 return self.database(database=database).table(
429 name=name,
430 database=database,
431 schema=schema,
432 )
433 sqla_table = self._get_sqla_table(
434 name,
...
275 (self.name, str_signature(types)))
276 self._cache[types] = func
277 try:
NotImplementedError: Could not find signature for dtype: <PGDialect_psycopg2, TSVECTOR>
```
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
| [
{
"content": "from __future__ import annotations\n\nimport parsy\nimport sqlalchemy as sa\nimport toolz\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy.dialects.postgresql.base import PGDialect\n\nimport ibis.expr.datatypes as dt\nfrom ibis.backends.base.sql.alchemy import to_sqla_type\nfrom ibis.common.parsing import (\n COMMA,\n LBRACKET,\n LPAREN,\n PRECISION,\n RBRACKET,\n RPAREN,\n SCALE,\n spaceless,\n spaceless_string,\n)\n\n_BRACKETS = \"[]\"\n\n\ndef _parse_numeric(\n text: str, default_decimal_parameters: tuple[int | None, int | None] = (None, None)\n) -> dt.DataType:\n decimal = spaceless_string(\"decimal\", \"numeric\").then(\n parsy.seq(LPAREN.then(PRECISION.skip(COMMA)), SCALE.skip(RPAREN))\n .optional(default_decimal_parameters)\n .combine(dt.Decimal)\n )\n\n brackets = spaceless(LBRACKET).then(spaceless(RBRACKET))\n\n pg_array = parsy.seq(decimal, brackets.at_least(1).map(len)).combine(\n lambda value_type, n: toolz.nth(n, toolz.iterate(dt.Array, value_type))\n )\n\n ty = pg_array | decimal\n return ty.parse(text)\n\n\ndef _get_type(typestr: str) -> dt.DataType:\n is_array = typestr.endswith(_BRACKETS)\n if (typ := _type_mapping.get(typestr.replace(_BRACKETS, \"\"))) is not None:\n return dt.Array(typ) if is_array else typ\n return _parse_numeric(typestr)\n\n\n_type_mapping = {\n \"bigint\": dt.int64,\n \"boolean\": dt.bool,\n \"bytea\": dt.binary,\n \"character varying\": dt.string,\n \"character\": dt.string,\n \"character(1)\": dt.string,\n \"date\": dt.date,\n \"double precision\": dt.float64,\n \"geography\": dt.geography,\n \"geometry\": dt.geometry,\n \"inet\": dt.inet,\n \"integer\": dt.int32,\n \"interval\": dt.interval,\n \"json\": dt.json,\n \"jsonb\": dt.json,\n \"line\": dt.linestring,\n \"macaddr\": dt.macaddr,\n \"macaddr8\": dt.macaddr,\n \"numeric\": dt.decimal,\n \"point\": dt.point,\n \"polygon\": dt.polygon,\n \"real\": dt.float32,\n \"smallint\": dt.int16,\n \"text\": dt.string,\n # NB: this isn't correct because we're losing the \"with time zone\"\n # information (ibis doesn't have time type that is time-zone aware), but we\n # try to do _something_ here instead of failing\n \"time with time zone\": dt.time,\n \"time without time zone\": dt.time,\n \"timestamp with time zone\": dt.Timestamp(\"UTC\"),\n \"timestamp without time zone\": dt.timestamp,\n \"uuid\": dt.uuid,\n}\n\n\n@to_sqla_type.register(PGDialect, dt.Array)\ndef _pg_array(dialect, itype):\n # Unwrap the array element type because sqlalchemy doesn't allow arrays of\n # arrays. This doesn't affect the underlying data.\n while itype.is_array():\n itype = itype.value_type\n return sa.ARRAY(to_sqla_type(dialect, itype))\n\n\n@to_sqla_type.register(PGDialect, dt.Map)\ndef _pg_map(dialect, itype):\n if not (itype.key_type.is_string() and itype.value_type.is_string()):\n raise TypeError(f\"PostgreSQL only supports map<string, string>, got: {itype}\")\n return postgresql.HSTORE()\n\n\[email protected](PGDialect, postgresql.DOUBLE_PRECISION)\ndef sa_double(_, satype, nullable=True):\n return dt.Float64(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.UUID)\ndef sa_uuid(_, satype, nullable=True):\n return dt.UUID(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.MACADDR)\ndef sa_macaddr(_, satype, nullable=True):\n return dt.MACADDR(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.HSTORE)\ndef sa_hstore(_, satype, nullable=True):\n return dt.Map(dt.string, dt.string, nullable=nullable)\n\n\[email protected](PGDialect, postgresql.INET)\ndef sa_inet(_, satype, nullable=True):\n return dt.INET(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.JSONB)\ndef sa_json(_, satype, nullable=True):\n return dt.JSON(nullable=nullable)\n\n\n_POSTGRES_FIELD_TO_IBIS_UNIT = {\n \"YEAR\": \"Y\",\n \"MONTH\": \"M\",\n \"DAY\": \"D\",\n \"HOUR\": \"h\",\n \"MINUTE\": \"m\",\n \"SECOND\": \"s\",\n \"YEAR TO MONTH\": \"M\",\n \"DAY TO HOUR\": \"h\",\n \"DAY TO MINUTE\": \"m\",\n \"DAY TO SECOND\": \"s\",\n \"HOUR TO MINUTE\": \"m\",\n \"HOUR TO SECOND\": \"s\",\n \"MINUTE TO SECOND\": \"s\",\n}\n\n\[email protected](PGDialect, postgresql.INTERVAL)\ndef sa_postgres_interval(_, satype, nullable=True):\n field = satype.fields.upper()\n if (unit := _POSTGRES_FIELD_TO_IBIS_UNIT.get(field, None)) is None:\n raise ValueError(f\"Unknown PostgreSQL interval field {field!r}\")\n elif unit in {\"Y\", \"M\"}:\n raise ValueError(\n \"Variable length intervals are not yet supported with PostgreSQL\"\n )\n return dt.Interval(unit=unit, nullable=nullable)\n\n\[email protected](PGDialect, sa.ARRAY)\ndef sa_pg_array(dialect, satype, nullable=True):\n dimensions = satype.dimensions\n if dimensions is not None and dimensions != 1:\n raise NotImplementedError(\n f\"Nested array types not yet supported for {dialect.name} dialect\"\n )\n\n value_dtype = dt.dtype(dialect, satype.item_type)\n return dt.Array(value_dtype, nullable=nullable)\n",
"path": "ibis/backends/postgres/datatypes.py"
}
] | [
{
"content": "from __future__ import annotations\n\nimport parsy\nimport sqlalchemy as sa\nimport toolz\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy.dialects.postgresql.base import PGDialect\n\nimport ibis.expr.datatypes as dt\nfrom ibis.backends.base.sql.alchemy import to_sqla_type\nfrom ibis.common.parsing import (\n COMMA,\n LBRACKET,\n LPAREN,\n PRECISION,\n RBRACKET,\n RPAREN,\n SCALE,\n spaceless,\n spaceless_string,\n)\n\n_BRACKETS = \"[]\"\n\n\ndef _parse_numeric(\n text: str, default_decimal_parameters: tuple[int | None, int | None] = (None, None)\n) -> dt.DataType:\n decimal = spaceless_string(\"decimal\", \"numeric\").then(\n parsy.seq(LPAREN.then(PRECISION.skip(COMMA)), SCALE.skip(RPAREN))\n .optional(default_decimal_parameters)\n .combine(dt.Decimal)\n )\n\n brackets = spaceless(LBRACKET).then(spaceless(RBRACKET))\n\n pg_array = parsy.seq(decimal, brackets.at_least(1).map(len)).combine(\n lambda value_type, n: toolz.nth(n, toolz.iterate(dt.Array, value_type))\n )\n\n ty = pg_array | decimal\n return ty.parse(text)\n\n\ndef _get_type(typestr: str) -> dt.DataType:\n is_array = typestr.endswith(_BRACKETS)\n if (typ := _type_mapping.get(typestr.replace(_BRACKETS, \"\"))) is not None:\n return dt.Array(typ) if is_array else typ\n return _parse_numeric(typestr)\n\n\n_type_mapping = {\n \"bigint\": dt.int64,\n \"boolean\": dt.bool,\n \"bytea\": dt.binary,\n \"character varying\": dt.string,\n \"character\": dt.string,\n \"character(1)\": dt.string,\n \"date\": dt.date,\n \"double precision\": dt.float64,\n \"geography\": dt.geography,\n \"geometry\": dt.geometry,\n \"inet\": dt.inet,\n \"integer\": dt.int32,\n \"interval\": dt.interval,\n \"json\": dt.json,\n \"jsonb\": dt.json,\n \"line\": dt.linestring,\n \"macaddr\": dt.macaddr,\n \"macaddr8\": dt.macaddr,\n \"numeric\": dt.decimal,\n \"point\": dt.point,\n \"polygon\": dt.polygon,\n \"real\": dt.float32,\n \"smallint\": dt.int16,\n \"text\": dt.string,\n # NB: this isn't correct because we're losing the \"with time zone\"\n # information (ibis doesn't have time type that is time-zone aware), but we\n # try to do _something_ here instead of failing\n \"time with time zone\": dt.time,\n \"time without time zone\": dt.time,\n \"timestamp with time zone\": dt.Timestamp(\"UTC\"),\n \"timestamp without time zone\": dt.timestamp,\n \"uuid\": dt.uuid,\n}\n\n\n@to_sqla_type.register(PGDialect, dt.Array)\ndef _pg_array(dialect, itype):\n # Unwrap the array element type because sqlalchemy doesn't allow arrays of\n # arrays. This doesn't affect the underlying data.\n while itype.is_array():\n itype = itype.value_type\n return sa.ARRAY(to_sqla_type(dialect, itype))\n\n\n@to_sqla_type.register(PGDialect, dt.Map)\ndef _pg_map(dialect, itype):\n if not (itype.key_type.is_string() and itype.value_type.is_string()):\n raise TypeError(f\"PostgreSQL only supports map<string, string>, got: {itype}\")\n return postgresql.HSTORE()\n\n\[email protected](PGDialect, postgresql.DOUBLE_PRECISION)\ndef sa_double(_, satype, nullable=True):\n return dt.Float64(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.UUID)\ndef sa_uuid(_, satype, nullable=True):\n return dt.UUID(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.MACADDR)\ndef sa_macaddr(_, satype, nullable=True):\n return dt.MACADDR(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.HSTORE)\ndef sa_hstore(_, satype, nullable=True):\n return dt.Map(dt.string, dt.string, nullable=nullable)\n\n\[email protected](PGDialect, postgresql.INET)\ndef sa_inet(_, satype, nullable=True):\n return dt.INET(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.JSONB)\ndef sa_json(_, satype, nullable=True):\n return dt.JSON(nullable=nullable)\n\n\n_POSTGRES_FIELD_TO_IBIS_UNIT = {\n \"YEAR\": \"Y\",\n \"MONTH\": \"M\",\n \"DAY\": \"D\",\n \"HOUR\": \"h\",\n \"MINUTE\": \"m\",\n \"SECOND\": \"s\",\n \"YEAR TO MONTH\": \"M\",\n \"DAY TO HOUR\": \"h\",\n \"DAY TO MINUTE\": \"m\",\n \"DAY TO SECOND\": \"s\",\n \"HOUR TO MINUTE\": \"m\",\n \"HOUR TO SECOND\": \"s\",\n \"MINUTE TO SECOND\": \"s\",\n}\n\n\[email protected](PGDialect, postgresql.INTERVAL)\ndef sa_postgres_interval(_, satype, nullable=True):\n field = satype.fields.upper()\n if (unit := _POSTGRES_FIELD_TO_IBIS_UNIT.get(field, None)) is None:\n raise ValueError(f\"Unknown PostgreSQL interval field {field!r}\")\n elif unit in {\"Y\", \"M\"}:\n raise ValueError(\n \"Variable length intervals are not yet supported with PostgreSQL\"\n )\n return dt.Interval(unit=unit, nullable=nullable)\n\n\[email protected](PGDialect, sa.ARRAY)\ndef sa_pg_array(dialect, satype, nullable=True):\n dimensions = satype.dimensions\n if dimensions is not None and dimensions != 1:\n raise NotImplementedError(\n f\"Nested array types not yet supported for {dialect.name} dialect\"\n )\n\n value_dtype = dt.dtype(dialect, satype.item_type)\n return dt.Array(value_dtype, nullable=nullable)\n\n\[email protected](PGDialect, postgresql.TSVECTOR)\ndef sa_postgres_tsvector(_, satype, nullable=True):\n return dt.String(nullable=nullable)\n",
"path": "ibis/backends/postgres/datatypes.py"
}
] | diff --git a/ci/schema/postgresql.sql b/ci/schema/postgresql.sql
index b6cceb785e0d..8be637c8d645 100644
--- a/ci/schema/postgresql.sql
+++ b/ci/schema/postgresql.sql
@@ -208,3 +208,9 @@ CREATE TABLE map (kv HSTORE);
INSERT INTO map VALUES
('a=>1,b=>2,c=>3'),
('d=>4,e=>5,c=>6');
+
+ALTER TABLE awards_players
+ADD search tsvector
+GENERATED always AS (
+ setweight(to_tsvector('simple', notes), 'A') :: tsvector
+) stored;
diff --git a/ibis/backends/postgres/datatypes.py b/ibis/backends/postgres/datatypes.py
index 9bcf71fe2d98..ae64e8aba937 100644
--- a/ibis/backends/postgres/datatypes.py
+++ b/ibis/backends/postgres/datatypes.py
@@ -170,3 +170,8 @@ def sa_pg_array(dialect, satype, nullable=True):
value_dtype = dt.dtype(dialect, satype.item_type)
return dt.Array(value_dtype, nullable=nullable)
+
+
[email protected](PGDialect, postgresql.TSVECTOR)
+def sa_postgres_tsvector(_, satype, nullable=True):
+ return dt.String(nullable=nullable)
diff --git a/ibis/backends/postgres/tests/test_string.py b/ibis/backends/postgres/tests/test_string.py
index 97803651a59d..b194fd682df3 100644
--- a/ibis/backends/postgres/tests/test_string.py
+++ b/ibis/backends/postgres/tests/test_string.py
@@ -4,6 +4,7 @@
from pytest import param
import ibis
+import ibis.expr.datatypes as dt
@pytest.mark.parametrize(
@@ -16,3 +17,9 @@ def test_special_strings(alltypes, data, data_type):
expr = alltypes[[alltypes.id, lit]].head(1)
df = expr.execute()
assert df['tmp'].iloc[0] == uuid.UUID(data)
+
+
+def test_load_tsvector_table(con):
+ awards_players = con.table("awards_players")
+ assert "search" in awards_players.columns
+ assert awards_players.schema()["search"] == dt.String(nullable=True)
|
ansible__ansible-lint-2926 | Collection dependencies from galaxy.yml not skipped in offline mode
# Issue Type
- Bug report
# Ansible and Ansible Lint details
```
ansible [core 2.13.3]
ansible-lint 6.11.0 using ansible 2.13.3
```
- ansible installation method: pip
- ansible-lint installation method: pip
# Desired Behavior
Offline mode should not try to download/install collection dependencies specified in galaxy.yml as it breaks CI pipelines. It should assume that dependencies have been installed in ANSIBLE_COLLECTIONS_PATH locations and skip the call to ansible-galaxy.
# Actual Behavior (Bug report only)
1. Create new collection e.g. with ``ansible-galaxy collection init test.test``
2. Edit galaxy.yml and add a dependency (e.g. ``test.dep: '*'``)
3. Run ansible-lint in offline mode within collection dir ```ansble-lint --offline```
Ansible-lint calles ansible-galaxy and tries to download the collection.
```
WARNING Retrying execution failure 1 of: ansible-galaxy collection install -vvv --force -p /home/401840/.cache/ansible-compat/9f86d0/collections .
ERROR Command returned 1 code:
ansible-galaxy [core 2.13.3]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/401840/.cache/ansible-compat/9f86d0/modules', '/home/401840/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/401840/.local/lib/python3.9/site-packages/ansible
ansible collection location = /home/401840/.cache/ansible-compat/9f86d0/collections:/home/401840/.ansible/collections:/usr/share/ansible/collections
executable location = /home/401840/.local/bin/ansible-galaxy
python version = 3.9.2 (default, Feb 28 2021, 17:03:44) [GCC 10.2.1 20210110]
jinja version = 3.0.1
libyaml = True
Using /etc/ansible/ansible.cfg as config file
Starting galaxy collection install process
Process install dependency map
Opened /home/401840/.ansible/galaxy_token
ERROR! Failed to resolve the requested dependencies map. Could not satisfy the following requirements:
* test.dev:* (dependency of test.test:1.0.0)
```
As the collection does not exist on galaxy.ansible.com, this will break. This will also break any CI pipeline running in a restricted / disconnected environment without access to public galaxy hub.
| [
{
"content": "\"\"\"Application.\"\"\"\nfrom __future__ import annotations\n\nimport itertools\nimport logging\nimport os\nfrom functools import lru_cache\nfrom typing import TYPE_CHECKING, Any\n\nfrom ansible_compat.runtime import Runtime\nfrom rich.markup import escape\nfrom rich.table import Table\n\nfrom ansiblelint import formatters\nfrom ansiblelint._mockings import _perform_mockings\nfrom ansiblelint.color import console, console_stderr, render_yaml\nfrom ansiblelint.config import PROFILES, get_version_warning\nfrom ansiblelint.config import options as default_options\nfrom ansiblelint.constants import RULE_DOC_URL, SUCCESS_RC, VIOLATIONS_FOUND_RC\nfrom ansiblelint.errors import MatchError\nfrom ansiblelint.stats import SummarizedResults, TagStats\n\nif TYPE_CHECKING:\n from argparse import Namespace\n from typing import Dict, Set # pylint: disable=ungrouped-imports\n\n from ansiblelint._internal.rules import BaseRule\n from ansiblelint.file_utils import Lintable\n from ansiblelint.runner import LintResult\n\n\n_logger = logging.getLogger(__package__)\n\n\nclass App:\n \"\"\"App class represents an execution of the linter.\"\"\"\n\n def __init__(self, options: Namespace):\n \"\"\"Construct app run based on already loaded configuration.\"\"\"\n options.skip_list = _sanitize_list_options(options.skip_list)\n options.warn_list = _sanitize_list_options(options.warn_list)\n\n self.options = options\n\n formatter_factory = choose_formatter_factory(options)\n self.formatter = formatter_factory(options.cwd, options.display_relative_path)\n\n self.runtime = Runtime(isolated=True)\n\n def render_matches(self, matches: list[MatchError]) -> None:\n \"\"\"Display given matches (if they are not fixed).\"\"\"\n matches = [match for match in matches if not match.fixed]\n\n if isinstance(\n self.formatter,\n (formatters.CodeclimateJSONFormatter, formatters.SarifFormatter),\n ):\n # If formatter CodeclimateJSONFormatter or SarifFormatter is chosen,\n # then print only the matches in JSON\n console.print(\n self.formatter.format_result(matches), markup=False, highlight=False\n )\n return\n\n ignored_matches = [match for match in matches if match.ignored]\n fatal_matches = [match for match in matches if not match.ignored]\n # Displayed ignored matches first\n if ignored_matches:\n _logger.warning(\n \"Listing %s violation(s) marked as ignored, likely already known\",\n len(ignored_matches),\n )\n for match in ignored_matches:\n if match.ignored:\n # highlight must be off or apostrophes may produce unexpected results\n console.print(self.formatter.format(match), highlight=False)\n if fatal_matches:\n _logger.warning(\n \"Listing %s violation(s) that are fatal\", len(fatal_matches)\n )\n for match in fatal_matches:\n if not match.ignored:\n console.print(self.formatter.format(match), highlight=False)\n\n # If run under GitHub Actions we also want to emit output recognized by it.\n if os.getenv(\"GITHUB_ACTIONS\") == \"true\" and os.getenv(\"GITHUB_WORKFLOW\"):\n formatter = formatters.AnnotationsFormatter(self.options.cwd, True)\n for match in itertools.chain(fatal_matches, ignored_matches):\n console.print(formatter.format(match), markup=False, highlight=False)\n\n # If sarif_file is set, we also dump the results to a sarif file.\n if self.options.sarif_file:\n sarif = formatters.SarifFormatter(self.options.cwd, True)\n json = sarif.format_result(matches)\n with open(self.options.sarif_file, \"w\", encoding=\"utf-8\") as sarif_file:\n sarif_file.write(json)\n\n def count_results(self, matches: list[MatchError]) -> SummarizedResults:\n \"\"\"Count failures and warnings in matches.\"\"\"\n result = SummarizedResults()\n\n for match in matches:\n # tag can include a sub-rule id: `yaml[document-start]`\n # rule.id is the generic rule id: `yaml`\n # *rule.tags is the list of the rule's tags (categories): `style`\n if match.tag not in result.tag_stats:\n result.tag_stats[match.tag] = TagStats(\n tag=match.tag, count=1, associated_tags=match.rule.tags\n )\n else:\n result.tag_stats[match.tag].count += 1\n\n if {match.tag, match.rule.id, *match.rule.tags}.isdisjoint(\n self.options.warn_list\n ):\n # not in warn_list\n if match.fixed:\n result.fixed_failures += 1\n else:\n result.failures += 1\n else:\n result.tag_stats[match.tag].warning = True\n if match.fixed:\n result.fixed_warnings += 1\n else:\n result.warnings += 1\n return result\n\n @staticmethod\n def count_lintables(files: set[Lintable]) -> tuple[int, int]:\n \"\"\"Count total and modified files.\"\"\"\n files_count = len(files)\n changed_files_count = len([file for file in files if file.updated])\n return files_count, changed_files_count\n\n @staticmethod\n def _get_matched_skippable_rules(\n matches: list[MatchError],\n ) -> dict[str, BaseRule]:\n \"\"\"Extract the list of matched rules, if skippable, from the list of matches.\"\"\"\n matches_unignored = [match for match in matches if not match.ignored]\n # match.tag is more specialized than match.rule.id\n matched_rules = {\n match.tag or match.rule.id: match.rule for match in matches_unignored\n }\n # remove unskippable rules from the list\n for rule_id in list(matched_rules.keys()):\n if \"unskippable\" in matched_rules[rule_id].tags:\n matched_rules.pop(rule_id)\n return matched_rules\n\n def report_outcome(self, result: LintResult, mark_as_success: bool = False) -> int:\n \"\"\"Display information about how to skip found rules.\n\n Returns exit code, 2 if errors were found, 0 when only warnings were found.\n \"\"\"\n msg = \"\"\n\n summary = self.count_results(result.matches)\n files_count, changed_files_count = self.count_lintables(result.files)\n\n matched_rules = self._get_matched_skippable_rules(result.matches)\n\n entries = []\n for key in sorted(matched_rules.keys()):\n if {key, *matched_rules[key].tags}.isdisjoint(self.options.warn_list):\n entries.append(f\" - {key} # {matched_rules[key].shortdesc}\\n\")\n for match in result.matches:\n if \"experimental\" in match.rule.tags:\n entries.append(\" - experimental # all rules tagged as experimental\\n\")\n break\n if entries and not self.options.quiet:\n console_stderr.print(\n \"You can skip specific rules or tags by adding them to your \"\n \"configuration file:\"\n )\n msg += \"\"\"\\\n# .config/ansible-lint.yml\nwarn_list: # or 'skip_list' to silence them completely\n\"\"\"\n msg += \"\".join(sorted(entries))\n\n # Do not deprecate the old tags just yet. Why? Because it is not currently feasible\n # to migrate old tags to new tags. There are a lot of things out there that still\n # use ansible-lint 4 (for example, Ansible Galaxy and Automation Hub imports). If we\n # replace the old tags, those tools will report warnings. If we do not replace them,\n # ansible-lint 5 will report warnings.\n #\n # We can do the deprecation once the ecosystem caught up at least a bit.\n # for k, v in used_old_tags.items():\n # _logger.warning(\n # \"Replaced deprecated tag '%s' with '%s' but it will become an \"\n # \"error in the future.\",\n # k,\n # v,\n # )\n\n if self.options.write_list and \"yaml\" in self.options.skip_list:\n _logger.warning(\n \"You specified '--write', but no files can be modified \"\n \"because 'yaml' is in 'skip_list'.\"\n )\n\n if mark_as_success and summary.failures and not self.options.progressive:\n mark_as_success = False\n\n if not self.options.quiet:\n console_stderr.print(render_yaml(msg))\n self.report_summary(\n summary, changed_files_count, files_count, is_success=mark_as_success\n )\n\n return SUCCESS_RC if mark_as_success else VIOLATIONS_FOUND_RC\n\n def report_summary( # pylint: disable=too-many-branches,too-many-locals\n self,\n summary: SummarizedResults,\n changed_files_count: int,\n files_count: int,\n is_success: bool,\n ) -> None:\n \"\"\"Report match and file counts.\"\"\"\n # sort the stats by profiles\n idx = 0\n rule_order = {}\n\n for profile, profile_config in PROFILES.items():\n for rule in profile_config[\"rules\"]:\n # print(profile, rule)\n rule_order[rule] = (idx, profile)\n idx += 1\n _logger.debug(\"Determined rule-profile order: %s\", rule_order)\n failed_profiles = set()\n for tag, tag_stats in summary.tag_stats.items():\n if tag in rule_order:\n tag_stats.order, tag_stats.profile = rule_order.get(tag, (idx, \"\"))\n elif \"[\" in tag:\n tag_stats.order, tag_stats.profile = rule_order.get(\n tag.split(\"[\")[0], (idx, \"\")\n )\n if tag_stats.profile:\n failed_profiles.add(tag_stats.profile)\n summary.sort()\n\n if changed_files_count:\n console_stderr.print(f\"Modified {changed_files_count} files.\")\n\n # determine which profile passed\n summary.passed_profile = \"\"\n passed_profile_count = 0\n for profile in PROFILES.keys():\n if profile in failed_profiles:\n break\n if profile != summary.passed_profile:\n summary.passed_profile = profile\n passed_profile_count += 1\n\n stars = \"\"\n if summary.tag_stats:\n table = Table(\n title=\"Rule Violation Summary\",\n collapse_padding=True,\n box=None,\n show_lines=False,\n )\n table.add_column(\"count\", justify=\"right\")\n table.add_column(\"tag\")\n table.add_column(\"profile\")\n table.add_column(\"rule associated tags\")\n for tag, stats in summary.tag_stats.items():\n table.add_row(\n str(stats.count),\n f\"[link={RULE_DOC_URL}{ tag.split('[')[0] }]{escape(tag)}[/link]\",\n stats.profile,\n f\"{', '.join(stats.associated_tags)}{' (warning)' if stats.warning else ''}\",\n style=\"yellow\" if stats.warning else \"red\",\n )\n # rate stars for the top 5 profiles (min would not get\n rating = 5 - (len(PROFILES.keys()) - passed_profile_count)\n if 0 < rating < 6:\n stars = f\", {rating}/5 star rating\"\n\n console_stderr.print(table)\n console_stderr.print()\n\n if is_success:\n msg = \"[green]Passed[/] with \"\n else:\n msg = \"[red][bold]Failed[/][/] after \"\n\n if summary.passed_profile:\n msg += f\"[bold]{summary.passed_profile}[/] profile\"\n if stars:\n msg += stars\n\n msg += f\": {summary.failures} failure(s), {summary.warnings} warning(s)\"\n if summary.fixed:\n msg += f\", and fixed {summary.fixed} issue(s)\"\n msg += f\" on {files_count} files.\"\n\n # on offline mode and when run under pre-commit we do not want to\n # check for updates.\n if not self.options.offline and os.environ.get(\"PRE_COMMIT\", \"0\") != \"1\":\n version_warning = get_version_warning()\n if version_warning:\n msg += f\"\\n{version_warning}\"\n\n console_stderr.print(msg)\n\n\ndef choose_formatter_factory(\n options_list: Namespace,\n) -> type[formatters.BaseFormatter[Any]]:\n \"\"\"Select an output formatter based on the incoming command line arguments.\"\"\"\n r: type[formatters.BaseFormatter[Any]] = formatters.Formatter\n if options_list.format == \"quiet\":\n r = formatters.QuietFormatter\n elif options_list.format in (\"json\", \"codeclimate\"):\n r = formatters.CodeclimateJSONFormatter\n elif options_list.format == \"sarif\":\n r = formatters.SarifFormatter\n elif options_list.parseable or options_list.format == \"pep8\":\n r = formatters.ParseableFormatter\n return r\n\n\ndef _sanitize_list_options(tag_list: list[str]) -> list[str]:\n \"\"\"Normalize list options.\"\"\"\n # expand comma separated entries\n tags = set()\n for tag in tag_list:\n tags.update(str(tag).split(\",\"))\n # remove duplicates, and return as sorted list\n return sorted(set(tags))\n\n\n@lru_cache\ndef get_app() -> App:\n \"\"\"Return the application instance, caching the return value.\"\"\"\n offline = default_options.offline\n app = App(options=default_options)\n # Make linter use the cache dir from compat\n default_options.cache_dir = app.runtime.cache_dir\n\n role_name_check = 0\n if \"role-name\" in app.options.warn_list:\n role_name_check = 1\n elif \"role-name\" in app.options.skip_list:\n role_name_check = 2\n\n # mocking must happen before prepare_environment or galaxy install might\n # fail.\n _perform_mockings()\n app.runtime.prepare_environment(\n install_local=True, offline=offline, role_name_check=role_name_check\n )\n\n return app\n",
"path": "src/ansiblelint/app.py"
}
] | [
{
"content": "\"\"\"Application.\"\"\"\nfrom __future__ import annotations\n\nimport itertools\nimport logging\nimport os\nfrom functools import lru_cache\nfrom typing import TYPE_CHECKING, Any\n\nfrom ansible_compat.runtime import Runtime\nfrom rich.markup import escape\nfrom rich.table import Table\n\nfrom ansiblelint import formatters\nfrom ansiblelint._mockings import _perform_mockings\nfrom ansiblelint.color import console, console_stderr, render_yaml\nfrom ansiblelint.config import PROFILES, get_version_warning\nfrom ansiblelint.config import options as default_options\nfrom ansiblelint.constants import RULE_DOC_URL, SUCCESS_RC, VIOLATIONS_FOUND_RC\nfrom ansiblelint.errors import MatchError\nfrom ansiblelint.stats import SummarizedResults, TagStats\n\nif TYPE_CHECKING:\n from argparse import Namespace\n from typing import Dict, Set # pylint: disable=ungrouped-imports\n\n from ansiblelint._internal.rules import BaseRule\n from ansiblelint.file_utils import Lintable\n from ansiblelint.runner import LintResult\n\n\n_logger = logging.getLogger(__package__)\n\n\nclass App:\n \"\"\"App class represents an execution of the linter.\"\"\"\n\n def __init__(self, options: Namespace):\n \"\"\"Construct app run based on already loaded configuration.\"\"\"\n options.skip_list = _sanitize_list_options(options.skip_list)\n options.warn_list = _sanitize_list_options(options.warn_list)\n\n self.options = options\n\n formatter_factory = choose_formatter_factory(options)\n self.formatter = formatter_factory(options.cwd, options.display_relative_path)\n\n self.runtime = Runtime(isolated=True)\n\n def render_matches(self, matches: list[MatchError]) -> None:\n \"\"\"Display given matches (if they are not fixed).\"\"\"\n matches = [match for match in matches if not match.fixed]\n\n if isinstance(\n self.formatter,\n (formatters.CodeclimateJSONFormatter, formatters.SarifFormatter),\n ):\n # If formatter CodeclimateJSONFormatter or SarifFormatter is chosen,\n # then print only the matches in JSON\n console.print(\n self.formatter.format_result(matches), markup=False, highlight=False\n )\n return\n\n ignored_matches = [match for match in matches if match.ignored]\n fatal_matches = [match for match in matches if not match.ignored]\n # Displayed ignored matches first\n if ignored_matches:\n _logger.warning(\n \"Listing %s violation(s) marked as ignored, likely already known\",\n len(ignored_matches),\n )\n for match in ignored_matches:\n if match.ignored:\n # highlight must be off or apostrophes may produce unexpected results\n console.print(self.formatter.format(match), highlight=False)\n if fatal_matches:\n _logger.warning(\n \"Listing %s violation(s) that are fatal\", len(fatal_matches)\n )\n for match in fatal_matches:\n if not match.ignored:\n console.print(self.formatter.format(match), highlight=False)\n\n # If run under GitHub Actions we also want to emit output recognized by it.\n if os.getenv(\"GITHUB_ACTIONS\") == \"true\" and os.getenv(\"GITHUB_WORKFLOW\"):\n formatter = formatters.AnnotationsFormatter(self.options.cwd, True)\n for match in itertools.chain(fatal_matches, ignored_matches):\n console.print(formatter.format(match), markup=False, highlight=False)\n\n # If sarif_file is set, we also dump the results to a sarif file.\n if self.options.sarif_file:\n sarif = formatters.SarifFormatter(self.options.cwd, True)\n json = sarif.format_result(matches)\n with open(self.options.sarif_file, \"w\", encoding=\"utf-8\") as sarif_file:\n sarif_file.write(json)\n\n def count_results(self, matches: list[MatchError]) -> SummarizedResults:\n \"\"\"Count failures and warnings in matches.\"\"\"\n result = SummarizedResults()\n\n for match in matches:\n # tag can include a sub-rule id: `yaml[document-start]`\n # rule.id is the generic rule id: `yaml`\n # *rule.tags is the list of the rule's tags (categories): `style`\n if match.tag not in result.tag_stats:\n result.tag_stats[match.tag] = TagStats(\n tag=match.tag, count=1, associated_tags=match.rule.tags\n )\n else:\n result.tag_stats[match.tag].count += 1\n\n if {match.tag, match.rule.id, *match.rule.tags}.isdisjoint(\n self.options.warn_list\n ):\n # not in warn_list\n if match.fixed:\n result.fixed_failures += 1\n else:\n result.failures += 1\n else:\n result.tag_stats[match.tag].warning = True\n if match.fixed:\n result.fixed_warnings += 1\n else:\n result.warnings += 1\n return result\n\n @staticmethod\n def count_lintables(files: set[Lintable]) -> tuple[int, int]:\n \"\"\"Count total and modified files.\"\"\"\n files_count = len(files)\n changed_files_count = len([file for file in files if file.updated])\n return files_count, changed_files_count\n\n @staticmethod\n def _get_matched_skippable_rules(\n matches: list[MatchError],\n ) -> dict[str, BaseRule]:\n \"\"\"Extract the list of matched rules, if skippable, from the list of matches.\"\"\"\n matches_unignored = [match for match in matches if not match.ignored]\n # match.tag is more specialized than match.rule.id\n matched_rules = {\n match.tag or match.rule.id: match.rule for match in matches_unignored\n }\n # remove unskippable rules from the list\n for rule_id in list(matched_rules.keys()):\n if \"unskippable\" in matched_rules[rule_id].tags:\n matched_rules.pop(rule_id)\n return matched_rules\n\n def report_outcome(self, result: LintResult, mark_as_success: bool = False) -> int:\n \"\"\"Display information about how to skip found rules.\n\n Returns exit code, 2 if errors were found, 0 when only warnings were found.\n \"\"\"\n msg = \"\"\n\n summary = self.count_results(result.matches)\n files_count, changed_files_count = self.count_lintables(result.files)\n\n matched_rules = self._get_matched_skippable_rules(result.matches)\n\n entries = []\n for key in sorted(matched_rules.keys()):\n if {key, *matched_rules[key].tags}.isdisjoint(self.options.warn_list):\n entries.append(f\" - {key} # {matched_rules[key].shortdesc}\\n\")\n for match in result.matches:\n if \"experimental\" in match.rule.tags:\n entries.append(\" - experimental # all rules tagged as experimental\\n\")\n break\n if entries and not self.options.quiet:\n console_stderr.print(\n \"You can skip specific rules or tags by adding them to your \"\n \"configuration file:\"\n )\n msg += \"\"\"\\\n# .config/ansible-lint.yml\nwarn_list: # or 'skip_list' to silence them completely\n\"\"\"\n msg += \"\".join(sorted(entries))\n\n # Do not deprecate the old tags just yet. Why? Because it is not currently feasible\n # to migrate old tags to new tags. There are a lot of things out there that still\n # use ansible-lint 4 (for example, Ansible Galaxy and Automation Hub imports). If we\n # replace the old tags, those tools will report warnings. If we do not replace them,\n # ansible-lint 5 will report warnings.\n #\n # We can do the deprecation once the ecosystem caught up at least a bit.\n # for k, v in used_old_tags.items():\n # _logger.warning(\n # \"Replaced deprecated tag '%s' with '%s' but it will become an \"\n # \"error in the future.\",\n # k,\n # v,\n # )\n\n if self.options.write_list and \"yaml\" in self.options.skip_list:\n _logger.warning(\n \"You specified '--write', but no files can be modified \"\n \"because 'yaml' is in 'skip_list'.\"\n )\n\n if mark_as_success and summary.failures and not self.options.progressive:\n mark_as_success = False\n\n if not self.options.quiet:\n console_stderr.print(render_yaml(msg))\n self.report_summary(\n summary, changed_files_count, files_count, is_success=mark_as_success\n )\n\n return SUCCESS_RC if mark_as_success else VIOLATIONS_FOUND_RC\n\n def report_summary( # pylint: disable=too-many-branches,too-many-locals\n self,\n summary: SummarizedResults,\n changed_files_count: int,\n files_count: int,\n is_success: bool,\n ) -> None:\n \"\"\"Report match and file counts.\"\"\"\n # sort the stats by profiles\n idx = 0\n rule_order = {}\n\n for profile, profile_config in PROFILES.items():\n for rule in profile_config[\"rules\"]:\n # print(profile, rule)\n rule_order[rule] = (idx, profile)\n idx += 1\n _logger.debug(\"Determined rule-profile order: %s\", rule_order)\n failed_profiles = set()\n for tag, tag_stats in summary.tag_stats.items():\n if tag in rule_order:\n tag_stats.order, tag_stats.profile = rule_order.get(tag, (idx, \"\"))\n elif \"[\" in tag:\n tag_stats.order, tag_stats.profile = rule_order.get(\n tag.split(\"[\")[0], (idx, \"\")\n )\n if tag_stats.profile:\n failed_profiles.add(tag_stats.profile)\n summary.sort()\n\n if changed_files_count:\n console_stderr.print(f\"Modified {changed_files_count} files.\")\n\n # determine which profile passed\n summary.passed_profile = \"\"\n passed_profile_count = 0\n for profile in PROFILES.keys():\n if profile in failed_profiles:\n break\n if profile != summary.passed_profile:\n summary.passed_profile = profile\n passed_profile_count += 1\n\n stars = \"\"\n if summary.tag_stats:\n table = Table(\n title=\"Rule Violation Summary\",\n collapse_padding=True,\n box=None,\n show_lines=False,\n )\n table.add_column(\"count\", justify=\"right\")\n table.add_column(\"tag\")\n table.add_column(\"profile\")\n table.add_column(\"rule associated tags\")\n for tag, stats in summary.tag_stats.items():\n table.add_row(\n str(stats.count),\n f\"[link={RULE_DOC_URL}{ tag.split('[')[0] }]{escape(tag)}[/link]\",\n stats.profile,\n f\"{', '.join(stats.associated_tags)}{' (warning)' if stats.warning else ''}\",\n style=\"yellow\" if stats.warning else \"red\",\n )\n # rate stars for the top 5 profiles (min would not get\n rating = 5 - (len(PROFILES.keys()) - passed_profile_count)\n if 0 < rating < 6:\n stars = f\", {rating}/5 star rating\"\n\n console_stderr.print(table)\n console_stderr.print()\n\n if is_success:\n msg = \"[green]Passed[/] with \"\n else:\n msg = \"[red][bold]Failed[/][/] after \"\n\n if summary.passed_profile:\n msg += f\"[bold]{summary.passed_profile}[/] profile\"\n if stars:\n msg += stars\n\n msg += f\": {summary.failures} failure(s), {summary.warnings} warning(s)\"\n if summary.fixed:\n msg += f\", and fixed {summary.fixed} issue(s)\"\n msg += f\" on {files_count} files.\"\n\n # on offline mode and when run under pre-commit we do not want to\n # check for updates.\n if not self.options.offline and os.environ.get(\"PRE_COMMIT\", \"0\") != \"1\":\n version_warning = get_version_warning()\n if version_warning:\n msg += f\"\\n{version_warning}\"\n\n console_stderr.print(msg)\n\n\ndef choose_formatter_factory(\n options_list: Namespace,\n) -> type[formatters.BaseFormatter[Any]]:\n \"\"\"Select an output formatter based on the incoming command line arguments.\"\"\"\n r: type[formatters.BaseFormatter[Any]] = formatters.Formatter\n if options_list.format == \"quiet\":\n r = formatters.QuietFormatter\n elif options_list.format in (\"json\", \"codeclimate\"):\n r = formatters.CodeclimateJSONFormatter\n elif options_list.format == \"sarif\":\n r = formatters.SarifFormatter\n elif options_list.parseable or options_list.format == \"pep8\":\n r = formatters.ParseableFormatter\n return r\n\n\ndef _sanitize_list_options(tag_list: list[str]) -> list[str]:\n \"\"\"Normalize list options.\"\"\"\n # expand comma separated entries\n tags = set()\n for tag in tag_list:\n tags.update(str(tag).split(\",\"))\n # remove duplicates, and return as sorted list\n return sorted(set(tags))\n\n\n@lru_cache\ndef get_app() -> App:\n \"\"\"Return the application instance, caching the return value.\"\"\"\n offline = default_options.offline\n app = App(options=default_options)\n # Make linter use the cache dir from compat\n default_options.cache_dir = app.runtime.cache_dir\n\n role_name_check = 0\n if \"role-name\" in app.options.warn_list:\n role_name_check = 1\n elif \"role-name\" in app.options.skip_list:\n role_name_check = 2\n\n # mocking must happen before prepare_environment or galaxy install might\n # fail.\n _perform_mockings()\n app.runtime.prepare_environment(\n install_local=(not offline), offline=offline, role_name_check=role_name_check\n )\n\n return app\n",
"path": "src/ansiblelint/app.py"
}
] | diff --git a/src/ansiblelint/app.py b/src/ansiblelint/app.py
index 1b49611801..ef74d473af 100644
--- a/src/ansiblelint/app.py
+++ b/src/ansiblelint/app.py
@@ -352,7 +352,7 @@ def get_app() -> App:
# fail.
_perform_mockings()
app.runtime.prepare_environment(
- install_local=True, offline=offline, role_name_check=role_name_check
+ install_local=(not offline), offline=offline, role_name_check=role_name_check
)
return app
|
fidals__shopelectro-870 | Add absolute urls to the canonical links. stb2
Необходимо поправить построение канонических ссылок на сайте
Адрес в канонической ссылке должен быть обязательно абсолютный
<link rel="canonical" href="https://www.сайт.ру/адрес_страницы" >
а не так
<link rel="canonical" href="/адрес_страницы" > - это неверно
Поисковики игнорируют этот тег, если указан относительный адрес в теге...
У меня при скане появляется много страниц дублей (пагинация), в коде указан каноникал. а при сканировании методом аналогичным поисковому роботу сраницы как канонические не помечаются
Вероятно, на STB нужно сделать так же.
| [
{
"content": "from django.conf import settings\n\n\ndef shop(request):\n \"\"\"\n Inject shop dict into request.\n\n Shop dict contains information about shop:\n emails, phones, API-integrations.\n \"\"\"\n return {\n 'shop': settings.SHOP,\n 'DEBUG': settings.DEBUG,\n 'BASE_URL': settings.BASE_URL,\n 'SENTRY_FRONT_DSN': settings.SENTRY_FRONT_DSN,\n }\n",
"path": "shopelectro/context_processors.py"
}
] | [
{
"content": "from django.conf import settings\n\n\ndef shop(request):\n \"\"\"\n Inject shop dict into request.\n\n Shop dict contains information about shop:\n emails, phones, API-integrations.\n \"\"\"\n return {\n 'shop': settings.SHOP,\n 'DEBUG': settings.DEBUG,\n 'base_url': settings.BASE_URL,\n 'SENTRY_FRONT_DSN': settings.SENTRY_FRONT_DSN,\n }\n",
"path": "shopelectro/context_processors.py"
}
] | diff --git a/requirements.txt b/requirements.txt
index d273a63b..be06e56c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -25,4 +25,4 @@ sorl-thumbnail==12.5.0
python-telegram-bot==11.1.0
sentry-sdk==0.7.2
https://github.com/selwin/django-user_agents/archive/master.zip
-https://github.com/fidals/refarm-site/archive/0.5.12.zip
+https://github.com/fidals/refarm-site/archive/0.6.0.zip
diff --git a/shopelectro/context_processors.py b/shopelectro/context_processors.py
index 1277410d..8a48e847 100644
--- a/shopelectro/context_processors.py
+++ b/shopelectro/context_processors.py
@@ -11,6 +11,6 @@ def shop(request):
return {
'shop': settings.SHOP,
'DEBUG': settings.DEBUG,
- 'BASE_URL': settings.BASE_URL,
+ 'base_url': settings.BASE_URL,
'SENTRY_FRONT_DSN': settings.SENTRY_FRONT_DSN,
}
diff --git a/shopelectro/tests/tests_selenium_mobile.py b/shopelectro/tests/tests_selenium_mobile.py
index 0ca82f81..24f27c76 100644
--- a/shopelectro/tests/tests_selenium_mobile.py
+++ b/shopelectro/tests/tests_selenium_mobile.py
@@ -4,6 +4,8 @@
If you need to create new test-suite, subclass it from SeleniumTestCase class.
Every Selenium-based test suite uses fixture called dump.json.
"""
+import unittest
+
from django.conf import settings
from django.test import LiveServerTestCase, override_settings, tag
from django.urls import reverse
@@ -11,8 +13,8 @@
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
-from shopelectro.selenium import SiteDriver
from shopelectro.models import Category, Product
+from shopelectro.selenium import SiteDriver
class MobileSeleniumTestCase(LiveServerTestCase):
@@ -64,6 +66,8 @@ def wait_page_loading(self):
)
)
+ # @todo #850:60m Fix Mobile.search tests group.
+ # search_input property doesn't input content.
@property
def search_input(self):
return self.wait.until(EC.visibility_of_element_located(
@@ -81,6 +85,8 @@ def submit_search(self):
)).click()
self.wait.until(EC.url_contains('/search/'))
+ # waiting self.search_input fix
+ @unittest.skip
def test_ui(self):
"""
Test mobile ui.
@@ -93,10 +99,15 @@ def test_ui(self):
self.toggle_menu()
self.assertTrue(self.search_input.is_displayed())
+ # waiting self.search_input fix
+ @unittest.skip
def test_search_autocomplete(self):
"""Autocomplete in mobile search should work."""
self.toggle_menu()
+ print('before input')
+
self.search_input.send_keys('Cate')
+ print('after input')
suggestions = self.wait.until(EC.visibility_of_any_elements_located(
(By.CLASS_NAME, 'autocomplete-suggestion')
))
@@ -104,6 +115,8 @@ def test_search_autocomplete(self):
for item in suggestions[:-1]:
self.assertTrue(item.get_attribute('data-val') == 'Cate')
+ # waiting self.search_input fix
+ @unittest.skip
def test_search_submit(self):
"""Mobile search form has submit button."""
self.toggle_menu()
diff --git a/shopelectro/tests/tests_views.py b/shopelectro/tests/tests_views.py
index c78c0a54..d5951e12 100644
--- a/shopelectro/tests/tests_views.py
+++ b/shopelectro/tests/tests_views.py
@@ -6,6 +6,7 @@
"""
import json
from functools import partial
+from itertools import chain
from operator import attrgetter
from urllib.parse import urlparse, quote
from xml.etree import ElementTree as ET
@@ -17,17 +18,15 @@
from django.test import override_settings, TestCase, tag
from django.urls import reverse
from django.utils.translation import ugettext as _
-from itertools import chain
from catalog.helpers import reverse_catalog_url
-from pages.urls import reverse_custom_page
from pages.models import CustomPage
-
+from pages.urls import reverse_custom_page
from shopelectro import models
from shopelectro import views
from shopelectro.views.service import generate_md5_for_ya_kassa, YANDEX_REQUEST_PARAM
-CANONICAL_HTML_TAG = '<link rel="canonical" href="{path}">'
+CANONICAL_HTML_TAG = '<link rel="canonical" href="{base_url}{path}">'
def get_page_number(response):
@@ -494,7 +493,10 @@ def test_canonical_meta_tag(self):
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
- CANONICAL_HTML_TAG.format(path=path),
+ CANONICAL_HTML_TAG.format(
+ base_url=settings.BASE_URL,
+ path=path
+ ),
)
def test_tags_pagination_has_canonical_links(self):
@@ -519,7 +521,10 @@ def test_tags_pagination_has_canonical_links(self):
)
response = self.client.get(paginated_url)
self.assertContains(
- response, CANONICAL_HTML_TAG.format(path=not_paginated_url)
+ response, CANONICAL_HTML_TAG.format(
+ base_url=settings.BASE_URL,
+ path=not_paginated_url
+ )
)
def test_category_matrix_page(self):
diff --git a/templates/catalog/catalog.html b/templates/catalog/catalog.html
index 00224611..39308f41 100644
--- a/templates/catalog/catalog.html
+++ b/templates/catalog/catalog.html
@@ -4,7 +4,7 @@
{% load pages_extras %}
{% block content %}
- {% breadcrumbs_with_siblings page '' BASE_URL %}
+ {% breadcrumbs_with_siblings page '' base_url %}
<h1>{{ page.display.h1 }}</h1>
<div class="container container-fluid">
diff --git a/templates/catalog/category.html b/templates/catalog/category.html
index a1a94c9f..d0a73f71 100644
--- a/templates/catalog/category.html
+++ b/templates/catalog/category.html
@@ -4,7 +4,7 @@
{% load se_extras %}
{% block content %}
- {% breadcrumbs_with_siblings page '' BASE_URL %}
+ {% breadcrumbs_with_siblings page '' base_url %}
<h1 class="category-title" data-name="{{ category.name }}">{{ page.display.h1|capfirst }}</h1>
<div class="row overflow-anchor-control">
diff --git a/templates/catalog/product.html b/templates/catalog/product.html
index 47153df0..e2efc950 100644
--- a/templates/catalog/product.html
+++ b/templates/catalog/product.html
@@ -21,7 +21,7 @@
href="https://schema.org/{{ product.in_stock | yesno:'InStock,PreOrder' }}">
</div>
- {% breadcrumbs_with_siblings page '' BASE_URL %}
+ {% breadcrumbs_with_siblings page '' base_url %}
<h1 class="product-h1" itemprop="name">{{ product.page.display.h1 }}</h1>
<span class="product-article">Арт. {{ product.vendor_code }}</span>
diff --git a/templates/layout/base.html b/templates/layout/base.html
index f01f6a98..1b4b948d 100644
--- a/templates/layout/base.html
+++ b/templates/layout/base.html
@@ -8,7 +8,7 @@
<head>
{% include 'layout/google_tag_manager.html' with DEBUG=DEBUG is_head_tag=True %}
{% if page %}
- {% include 'layout/metadata.html' with page=page request=request paginated=paginated only %}
+ {% include 'layout/metadata.html' with page=page request=request paginated=paginated base_url=base_url only %}
{% endif %}
{% block stylesheets %}
<link rel="stylesheet" href="{% static 'css/styles.min.css' %}">
diff --git a/templates/layout/metadata.html b/templates/layout/metadata.html
index 5ff07b6b..1e25f1b4 100644
--- a/templates/layout/metadata.html
+++ b/templates/layout/metadata.html
@@ -38,12 +38,12 @@
<meta name="cmsmagazine" content="8a67cdaf9ded6448bd3626abd67b56e4">
{# request path is current path, but without http url's query string #}
-<link rel="canonical" href="{{ request.path }}">
+<link rel="canonical" href="{{ base_url }}{{ request.path }}">
{% if paginated.page.has_previous %}
- <link rel="prev" href="{{ request.path }}{% if paginated.page.previous_page_number > 1 %}?page={{ paginated.page.previous_page_number }}{% endif %}">
+ <link rel="prev" href="{{ base_url }}{{ request.path }}{% if paginated.page.previous_page_number > 1 %}?page={{ paginated.page.previous_page_number }}{% endif %}">
{% endif %}
{% if paginated.page.has_next %}
- <link rel="next" href="{{ request.path }}?page={{ paginated.page.next_page_number }}">
+ <link rel="next" href="{{ base_url }}{{ request.path }}?page={{ paginated.page.next_page_number }}">
{% endif %}
<link rel="icon" type="image/x-icon" href="{% static 'images/favicon.ico' %}">
|
google-deepmind__optax-369 | `noisy_sgd` adds noise after scale by learning rate.
Thanks for the effort in this awesome library!
According to [these](https://github.com/deepmind/optax/blob/master/optax/_src/alias.py#L408#L435), it seems like the noise is added after scaling by the learning rate. That is, the noise is independent of the learning rate. I'm curious if this is an intentional design?
| [
{
"content": "# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Aliases for popular optimisers.\"\"\"\n\nfrom typing import Any, Callable, Optional, Union\n\nimport jax.numpy as jnp\n\nfrom optax._src import base\nfrom optax._src import clipping\nfrom optax._src import combine\nfrom optax._src import factorized\nfrom optax._src import privacy\nfrom optax._src import transform\nfrom optax._src import wrappers\n\n\nScalarOrSchedule = Union[float, base.Schedule]\nMaskOrFn = Optional[Union[Any, Callable[[base.Params], Any]]]\n\n\ndef _scale_by_learning_rate(learning_rate: ScalarOrSchedule, flip_sign=True):\n m = -1 if flip_sign else 1\n if callable(learning_rate):\n return transform.scale_by_schedule(lambda count: m * learning_rate(count))\n return transform.scale(m * learning_rate)\n\n\ndef adabelief(\n learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-16,\n eps_root: float = 1e-16) -> base.GradientTransformation:\n \"\"\"The AdaBelief optimiser.\n\n AdaBelief is an adaptive learning rate optimiser that focuses on fast\n convergence, generalisation, and stability. It adapts the step size depending\n on its \"belief\" in the gradient direction — the optimiser adaptively scales\n the step size by the difference between the predicted and observed gradients.\n AdaBelief is a modified version of Adam and contains the same number of\n parameters.\n\n References:\n Zhuang et al, 2020: https://arxiv.org/abs/2010.07468\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n b1: the exponential decay rate to track the first moment of past gradients.\n b2: the exponential decay rate to track the second moment of past gradients.\n eps: term added to the denominator to improve numerical stability.\n eps_root: term added to the second moment of the prediction error to\n improve numerical stability. If backpropagating gradients through the\n gradient transformation (e.g. for meta-learning), this must be non-zero.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_belief(b1=b1, b2=b2, eps=eps, eps_root=eps_root),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef adafactor(\n learning_rate: Optional[ScalarOrSchedule] = None,\n min_dim_size_to_factor: int = 128,\n decay_rate: float = 0.8,\n decay_offset: int = 0,\n multiply_by_parameter_scale: float = True,\n clipping_threshold: Optional[float] = 1.0,\n momentum: Optional[float] = None,\n dtype_momentum: Any = jnp.float32,\n weight_decay_rate: Optional[float] = None,\n eps: float = 1e-30,\n factored: bool = True,\n weight_decay_mask: MaskOrFn = None,\n ) -> base.GradientTransformation:\n \"\"\"The Adafactor optimiser.\n\n Adafactor is an adaptive learning rate optimiser that focuses on fast\n training of large scale neural networks. It saves memory by using a factored\n estimate of the second order moments used to scale gradients.\n\n References:\n Shazeer and Stern, 2018: https://arxiv.org/abs/1804.04235\n\n Args:\n learning_rate: (float) a step size. Note: the natural scale for\n Adafactor's LR is markedly different from Adam, one doesn't use the\n 1/sqrt(hidden) correction for this optim with attention-based models.\n min_dim_size_to_factor: (int) only factor the statistics if two array\n dimensions have at least this size.\n decay_rate: (float) controls second-moment exponential decay schedule.\n decay_offset: (int) for finetuning, one may set this to the starting\n step number of the finetuning phase.\n multiply_by_parameter_scale: (bool): if True, then scale learning_rate by\n parameter norm. if False, provided learning_rate is absolute step size.\n clipping_threshold: (float>=1) optional value; if None, clipping disabled.\n momentum: (float) optional value between 0 and 1, enables\n momentum and uses extra memory if non-None! None by default.\n dtype_momentum: (dtype) dtype of momentum buffers.\n weight_decay_rate: (float) optional rate at which to decay weights.\n eps: (float) regularization constant for root mean squared gradient.\n factored: (bool) whether to use factored second-moment estimates.\n weight_decay_mask: a tree with same structure as (or a prefix of)\n the params PyTree, or a Callable that returns such a pytree given\n the params/updates. The leaves should be booleans, `True`\n for leaves/subtrees you want to apply the transformation to,\n and `False` for those you want to skip.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n # The core of the algorithm is a procedure for rescaling gradients\n # by a factored estimate of the root mean squared gradients.\n # This reduces memory compared to algorithms such as Adam or RmsProp,\n # by not having to hold a separate estimate for each weight.\n tx = [\n factorized.scale_by_factored_rms(\n factored, decay_rate, decay_offset, min_dim_size_to_factor, eps)]\n # This basic rescaling is typically combined with one or more of the following\n # transformation (all can be disabled via adafactor's constructor args).\n if clipping_threshold is not None:\n tx.append(clipping.clip_by_block_rms(clipping_threshold))\n if learning_rate is not None:\n tx.append(_scale_by_learning_rate(learning_rate, flip_sign=False))\n if multiply_by_parameter_scale:\n tx.append(transform.scale_by_param_block_rms())\n if momentum is not None:\n tx.append(\n transform.ema(momentum, debias=False, accumulator_dtype=dtype_momentum))\n if weight_decay_rate is not None:\n tx.append(transform.add_decayed_weights(\n weight_decay_rate, mask=weight_decay_mask))\n # In gradient \"descent\" we follow the negative gradient.\n tx.append(transform.scale(-1))\n return combine.chain(*tx)\n\n\ndef adagrad(\n learning_rate: ScalarOrSchedule,\n initial_accumulator_value: float = 0.1,\n eps: float = 1e-7\n) -> base.GradientTransformation:\n \"\"\"The Adagrad optimizer.\n\n Adagrad is an algorithm for gradient based optimisation that anneals the\n learning rate for each parameter during the course of training.\n\n WARNING: Adagrad's main limit is the monotonic accumulation of squared\n gradients in the denominator: since all terms are >0, the sum keeps growing\n during training and the learning rate eventually becomes vanishingly small.\n\n References:\n Duchi et al, 2011: https://jmlr.org/papers/v12/duchi11a.html\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n initial_accumulator_value: initialisation for the accumulator.\n eps: a small constant applied to denominator inside of the square root\n (as in RMSProp) to avoid dividing by zero when rescaling.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_rss(\n initial_accumulator_value=initial_accumulator_value, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef adam(\n learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n eps_root: float = 0.0,\n mu_dtype: Optional[Any] = None,\n) -> base.GradientTransformation:\n \"\"\"The classic Adam optimiser.\n\n Adam is an SGD variant with learning rate adaptation. The `learning_rate`\n used for each weight is computed from estimates of first- and second-order\n moments of the gradients (using suitable exponential moving averages).\n\n References:\n Kingma et al, 2014: https://arxiv.org/abs/1412.6980\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n b1: the exponential decay rate to track the first moment of past gradients.\n b2: the exponential decay rate to track the second moment of past gradients.\n eps: a small constant applied to denominator outside of the square root\n (as in the Adam paper) to avoid dividing by zero when rescaling.\n eps_root: (default `0`), a small constant applied to denominator inside the\n square root (as in RMSProp), to avoid dividing by zero when rescaling.\n This is needed for example when computing (meta-)gradients through Adam.\n mu_dtype: optional `dtype` to be used for the first order accumulator; if\n `None` then the `dtype` is inferred from `params` and `updates`.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_adam(\n b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef adamw(\n learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n eps_root: float = 0.0,\n mu_dtype: Optional[Any] = None,\n weight_decay: float = 1e-4,\n mask: Optional[Union[Any, Callable[[base.Params], Any]]] = None,\n) -> base.GradientTransformation:\n \"\"\"Adam with weight decay regularization.\n\n AdamW uses weight decay to regularise learning towards small weights, as\n this leads to better generalisation. In SGD you can also use L2 regularisation\n to implement this as an additive loss term, however L2 regularization\n does not behave as intended for adaptive gradient algorithms such as Adam.\n\n WARNING: Sometimes you may want to skip weight decay for BatchNorm scale or\n for the bias parameters. You can use `optax.masked` to make your own AdamW\n variant where `additive_weight_decay` is applied only to a subset of `params`.\n\n References:\n Loshchilov et al, 2019: https://arxiv.org/abs/1711.05101\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n b1: the exponential decay rate to track the first moment of past gradients.\n b2: the exponential decay rate to track the second moment of past gradients.\n eps: a small constant applied to denominator outside of the square root\n (as in the Adam paper) to avoid dividing by zero when rescaling.\n eps_root: (default `0`), a small constant applied to denominator inside the\n square root (as in RMSProp), to avoid dividing by zero when rescaling.\n This is needed for instance when computing (meta-)gradients through Adam.\n mu_dtype: optional `dtype` to be used for the first order accumulator; if\n `None` then the `dtype` is inferred from `params` and `updates`.\n weight_decay: strength of the weight decay regularization. Note that this\n weight decay is multiplied with the learning rate. This is consistent\n with other frameworks such as PyTorch, but different from\n (Loshchilov et al, 2019) where the weight decay is only multiplied with\n the \"schedule multiplier\", but not the base learning rate.\n mask: a tree with same structure as (or a prefix of) the params PyTree,\n or a Callable that returns such a pytree given the params/updates.\n The leaves should be booleans, `True` for leaves/subtrees you want to\n apply the weight decay to, and `False` for those you want to skip. Note\n that the Adam gradient transformations are applied to all parameters.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_adam(\n b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype),\n transform.add_decayed_weights(weight_decay, mask),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef fromage(\n learning_rate: float,\n min_norm: float = 1e-6\n) -> base.GradientTransformation:\n \"\"\"The Frobenius matched gradient descent (Fromage) optimiser.\n\n Fromage is a learning algorithm that does not require learning rate tuning.\n The optimiser is based on modelling neural network gradients via deep relative\n trust (a distance function on deep neural networks). Fromage is similar to the\n LARS optimiser and can work on a range of standard neural network benchmarks,\n such as natural language Transformers and generative adversarial networks.\n\n References:\n Bernstein et al, 2020: https://arxiv.org/abs/2002.03432\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n min_norm: a minimum value that the norm of the gradient updates and the\n norm of the layer parameters can be clipped to to avoid dividing by zero\n when computing the trust ratio (as in the LARS paper).\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n mult = 1 / jnp.sqrt(1 + learning_rate ** 2)\n return combine.chain(\n transform.scale_by_trust_ratio(min_norm),\n _scale_by_learning_rate(learning_rate * mult),\n transform.add_decayed_weights((mult - 1)),\n )\n\n\ndef lars(\n learning_rate: ScalarOrSchedule,\n weight_decay: float = 0.,\n weight_decay_mask: MaskOrFn = True,\n trust_coefficient: float = 0.001,\n eps: float = 0.,\n trust_ratio_mask: MaskOrFn = True,\n momentum: float = 0.9,\n nesterov: bool = False,\n) -> base.GradientTransformation:\n \"\"\"The LARS optimiser.\n\n LARS is a layer-wise adaptive optimiser introduced to help scale SGD to\n larger batch sizes. LARS later inspired the LAMB optimiser.\n\n References:\n You et al, 2017: https://arxiv.org/abs/1708.03888\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n weight_decay (default `0.`): strength of the weight decay regularization.\n weight_decay_mask: a tree with same structure as (or a prefix of) the params\n PyTree, or a Callable that returns such a pytree given the params/updates.\n The leaves should be booleans, `True` for leaves/subtrees you want to\n apply the transformation to, and `False` for those you want to skip.\n trust_coefficient: a multiplier for the trust ratio.\n eps: optional additive constant in the trust ratio denominator.\n trust_ratio_mask: a tree with same structure as (or a prefix of) the params\n PyTree, or a Callable that returns such a pytree given the params/updates.\n The leaves should be booleans, `True` for leaves/subtrees you want to\n apply the transformation to, and `False` for those you want to skip.\n momentum: the decay rate for momentum.\n nesterov: whether to use Nesterov momentum.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.add_decayed_weights(weight_decay, mask=weight_decay_mask),\n wrappers.masked(\n inner=transform.scale_by_trust_ratio(\n trust_coefficient=trust_coefficient, eps=eps),\n mask=trust_ratio_mask),\n _scale_by_learning_rate(learning_rate),\n transform.trace(decay=momentum, nesterov=nesterov),\n )\n\n\ndef lamb(\n learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-6,\n eps_root: float = 0.0,\n weight_decay: float = 0.,\n mask: MaskOrFn = None,\n) -> base.GradientTransformation:\n \"\"\"The LAMB optimiser.\n\n LAMB is a general purpose layer-wise adaptive large batch optimiser designed\n to provide consistent training performance across a wide range of tasks,\n including those that use attention-based models (such as Transformers) and\n ResNet-50. The optimiser is able to work with small and large batch sizes.\n LAMB was inspired by the LARS learning algorithm.\n\n References:\n You et al, 2019: https://arxiv.org/abs/1904.00962\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n b1: the exponential decay rate to track the first moment of past gradients.\n b2: the exponential decay rate to track the second moment of past gradients.\n eps: a small constant applied to denominator outside of the square root\n (as in the Adam paper) to avoid dividing by zero when rescaling.\n eps_root: (default `0.0`), a small constant applied to denominator inside\n the square root (as in RMSProp), to avoid dividing by zero when rescaling.\n This is needed for instance when computing (meta-)gradients through Adam.\n weight_decay (default `0.`): strength of the weight decay regularization.\n mask: a tree with same structure as (or a prefix of) the params PyTree,\n or a Callable that returns such a pytree given the params/updates.\n The leaves should be booleans, `True` for leaves/subtrees you want to\n apply the transformation to, and `False` for those you want to skip.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root),\n transform.add_decayed_weights(weight_decay=weight_decay, mask=mask),\n transform.scale_by_trust_ratio(),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef noisy_sgd(\n learning_rate: ScalarOrSchedule,\n eta: float = 0.01,\n gamma: float = 0.55,\n seed: int = 0\n) -> base.GradientTransformation:\n r\"\"\"A variant of SGD with added noise.\n\n It has been found that adding noise to the gradients can improve\n both the training error and the generalisation error in very deep networks.\n\n References:\n Neelakantan et al, 2014: https://arxiv.org/abs/1511.06807\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n eta: the initial variance for the gaussian noise added to gradients.\n gamma: a parameter controlling the annealing of noise over time,\n the variance decays according to `(1+t)^-\\gamma`.\n seed: the seed for the pseudo-random generation process.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n _scale_by_learning_rate(learning_rate),\n transform.add_noise(eta, gamma, seed),\n )\n\n\ndef radam(\n learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n eps_root: float = 0.0,\n threshold: float = 5.0\n) -> base.GradientTransformation:\n \"\"\"The Rectified Adam optimiser.\n\n The adaptive learning rate in Adam has undesirably large variance in early\n stages of training, due to the limited number of training samples used to\n estimate the optimiser's statistics. Rectified Adam addresses this issue\n by analytically reducing the large variance.\n\n References:\n Kingma et al, 2014: https://arxiv.org/abs/1412.6980\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n b1: the exponential decay rate to track the first moment of past gradients.\n b2: the exponential decay rate to track the second moment of past gradients.\n eps: a small constant applied to denominator outside of the square root\n (as in the Adam paper) to avoid dividing by zero when rescaling.\n eps_root: (default `0`), a small constant applied to denominator inside the\n square root (as in RMSProp), to avoid dividing by zero when rescaling.\n This is needed for instance when computing (meta-)gradients through Adam.\n threshold: the threshold for variance tractability.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_radam(\n b1=b1, b2=b2, eps=eps, eps_root=eps_root, threshold=threshold),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef rmsprop(\n learning_rate: ScalarOrSchedule,\n decay: float = 0.9,\n eps: float = 1e-8,\n initial_scale: float = 0.,\n centered: bool = False,\n momentum: Optional[float] = None,\n nesterov: bool = False\n) -> base.GradientTransformation:\n # pylint: disable=line-too-long\n \"\"\"A flexible RMSProp optimiser.\n\n RMSProp is an SGD variant with learning rate adaptation. The `learning_rate`\n used for each weight is scaled by a suitable estimate of the magnitude of the\n gradients on previous steps. Several variants of RMSProp can be found\n in the literature. This alias provides an easy to configure RMSProp\n optimiser that can be used to switch between several of these variants.\n\n References:\n Tieleman and Hinton, 2012: http://www.cs.toronto.edu/~hinton/coursera/lecture6/lec6.pdf\n Graves, 2013: https://arxiv.org/abs/1308.0850\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n decay: the decay used to track the magnitude of previous gradients.\n eps: a small numerical constant to avoid dividing by zero when rescaling.\n initial_scale: (default `0.`), initialisation of accumulators tracking the\n magnitude of previous updates. PyTorch uses `0`, TF1 uses `1`. When\n reproducing results from a paper, verify the value used by the authors.\n centered: (default `False`), whether the second moment or the variance of\n the past gradients is used to rescale the latest gradients.\n momentum: (default `None`), the `decay` rate used by the momentum term,\n when it is set to `None`, then momentum is not used at all.\n nesterov (default `False`): whether nesterov momentum is used.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n # pylint: enable=line-too-long\n if centered:\n return combine.chain(\n transform.scale_by_stddev(\n decay=decay, eps=eps, initial_scale=initial_scale),\n _scale_by_learning_rate(learning_rate),\n (transform.trace(decay=momentum, nesterov=nesterov)\n if momentum is not None else base.identity())\n )\n return combine.chain(\n transform.scale_by_rms(\n decay=decay, eps=eps, initial_scale=initial_scale),\n _scale_by_learning_rate(learning_rate),\n (transform.trace(decay=momentum, nesterov=nesterov)\n if momentum is not None else base.identity())\n )\n\n\ndef sgd(\n learning_rate: ScalarOrSchedule,\n momentum: Optional[float] = None,\n nesterov: bool = False,\n accumulator_dtype: Optional[Any] = None,\n) -> base.GradientTransformation:\n \"\"\"A canonical Stochastic Gradient Descent optimiser.\n\n This implements stochastic gradient descent. It also includes support for\n momentum, and nesterov acceleration, as these are standard practice when\n using stochastic gradient descent to train deep neural networks.\n\n References:\n Sutskever et al, 2013: http://proceedings.mlr.press/v28/sutskever13.pdf\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n momentum: (default `None`), the `decay` rate used by the momentum term,\n when it is set to `None`, then momentum is not used at all.\n nesterov (default `False`): whether nesterov momentum is used.\n accumulator_dtype: optional `dtype` to be used for the accumulator; if\n `None` then the `dtype` is inferred from `params` and `updates`.\n\n Returns:\n A `GradientTransformation`.\n \"\"\"\n return combine.chain(\n (transform.trace(decay=momentum, nesterov=nesterov,\n accumulator_dtype=accumulator_dtype)\n if momentum is not None else base.identity()),\n _scale_by_learning_rate(learning_rate)\n )\n\n\ndef sm3(\n learning_rate: float,\n momentum: float = 0.9\n) -> base.GradientTransformation:\n \"\"\"The SM3 optimiser.\n\n SM3 (Square-root of Minima of Sums of Maxima of Squared-gradients Method) is a\n memory-efficient adaptive optimiser designed to decrease memory overhead when\n training very large models, such as the Transformer for machine translation,\n BERT for language modelling, and AmoebaNet-D for image classification. SM3: 1)\n applies to tensors of arbitrary dimensions and any predefined cover of the\n parameters; 2) adapts the learning rates in an adaptive and data-driven manner\n (like Adagrad and unlike Adafactor); and 3) comes with rigorous convergence\n guarantees in stochastic convex optimization settings.\n\n References:\n Anil et al, 2019: https://arxiv.org/abs/1901.11150\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n momentum: the `decay` rate used by the momentum term (when it is not set to\n `None`, then momentum is not used at all).\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_sm3(momentum),\n transform.scale(-learning_rate),\n )\n\n\ndef yogi(\n learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-3,\n) -> base.GradientTransformation:\n \"\"\"The Yogi optimiser.\n\n Yogi is an adaptive optimiser, which provides control in tuning the effective\n learning rate to prevent it from increasing. By doing so, it focuses on\n addressing the issues of convergence and generalisation in exponential moving\n average-based adaptive methods (such as Adam and RMSprop). Yogi is a\n modification of Adam and uses the same parameters.\n\n References:\n Zaheer et al, 2020: http://www.sanjivk.com/yogi_nips2018.pdf\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n b1: the exponential decay rate to track the first moment of past gradients.\n b2: the exponential decay rate to track the second moment of past gradients.\n eps: a small constant applied to denominator outside of the square root\n (as in the Adam paper) to avoid dividing by zero when rescaling.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_yogi(b1=b1, b2=b2, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef dpsgd(\n learning_rate: ScalarOrSchedule,\n l2_norm_clip: float,\n noise_multiplier: float,\n seed: int,\n momentum: Optional[float] = None,\n nesterov: bool = False\n) -> base.GradientTransformation:\n \"\"\"The DPSGD optimiser.\n\n Differential privacy is a standard for privacy guarantees of algorithms\n learning from aggregate databases including potentially sensitive information.\n DPSGD offers protection against a strong adversary with full knowledge of the\n training mechanism and access to the model’s parameters.\n\n WARNING: This `GradientTransformation` expects input updates to have a batch\n dimension on the 0th axis. That is, this function expects per-example\n gradients as input (which are easy to obtain in JAX using `jax.vmap`).\n\n References:\n Abadi et al, 2016: https://arxiv.org/abs/1607.00133\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n l2_norm_clip: maximum L2 norm of the per-example gradients.\n noise_multiplier: ratio of standard deviation to the clipping norm.\n seed: initial seed used for the jax.random.PRNGKey\n momentum: (default `None`), the `decay` rate used by the momentum term,\n when it is set to `None`, then momentum is not used at all.\n nesterov (default `False`): whether nesterov momentum is used.\n\n Returns:\n A `GradientTransformation`.\n \"\"\"\n return combine.chain(\n privacy.differentially_private_aggregate(\n l2_norm_clip=l2_norm_clip,\n noise_multiplier=noise_multiplier,\n seed=seed),\n (transform.trace(decay=momentum, nesterov=nesterov)\n if momentum is not None else base.identity()),\n _scale_by_learning_rate(learning_rate)\n )\n\n\ndef adamax(\n learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n) -> base.GradientTransformation:\n \"\"\"A variant of the Adam optimizer that uses the infinity norm.\n\n References:\n Kingma et al, 2014: https://arxiv.org/abs/1412.6980\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n b1: the exponential decay rate to track the first moment of past gradients.\n b2: the exponential decay rate to track the maximum of past gradients.\n eps: a small constant applied to denominator to avoid dividing by zero when\n rescaling.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_adamax(b1=b1, b2=b2, eps=eps,),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef adamaxw(\n learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n weight_decay: float = 1e-4,\n mask: Optional[Union[Any, Callable[[base.Params], Any]]] = None,\n) -> base.GradientTransformation:\n \"\"\"Adamax with weight decay regularization.\n\n AdamaxW uses weight decay to regularise learning towards small weights, as\n this leads to better generalisation. In SGD you can also use L2 regularisation\n to implement this as an additive loss term, however L2 regularization\n does not behave as intended for adaptive gradient algorithms such as Adam.\n\n WARNING: Sometimes you may want to skip weight decay for BatchNorm scale or\n for the bias parameters. You can use `optax.masked` to make your own AdamaxW\n variant where `additive_weight_decay` is applied only to a subset of `params`.\n\n References:\n Loshchilov et al, 2019: https://arxiv.org/abs/1711.05101\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n b1: the exponential decay rate to track the first moment of past gradients.\n b2: the exponential decay rate to track the maximum of past gradients.\n eps: a small constant applied to denominator to avoid dividing by zero when\n rescaling.\n weight_decay: strength of the weight decay regularization. Note that this\n weight decay is multiplied with the learning rate. This is consistent\n with other frameworks such as PyTorch, but different from\n (Loshchilov et al, 2019) where the weight decay is only multiplied with\n the \"schedule multiplier\", but not the base learning rate.\n mask: a tree with same structure as (or a prefix of) the params PyTree,\n or a Callable that returns such a pytree given the params/updates.\n The leaves should be booleans, `True` for leaves/subtrees you want to\n apply the weight decay to, and `False` for those you want to skip. Note\n that the Adamax gradient transformations are applied to all parameters.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_adamax(b1=b1, b2=b2, eps=eps),\n transform.add_decayed_weights(weight_decay, mask),\n _scale_by_learning_rate(learning_rate),\n )\n",
"path": "optax/_src/alias.py"
}
] | [
{
"content": "# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Aliases for popular optimisers.\"\"\"\n\nfrom typing import Any, Callable, Optional, Union\n\nimport jax.numpy as jnp\n\nfrom optax._src import base\nfrom optax._src import clipping\nfrom optax._src import combine\nfrom optax._src import factorized\nfrom optax._src import privacy\nfrom optax._src import transform\nfrom optax._src import wrappers\n\n\nScalarOrSchedule = Union[float, base.Schedule]\nMaskOrFn = Optional[Union[Any, Callable[[base.Params], Any]]]\n\n\ndef _scale_by_learning_rate(learning_rate: ScalarOrSchedule, flip_sign=True):\n m = -1 if flip_sign else 1\n if callable(learning_rate):\n return transform.scale_by_schedule(lambda count: m * learning_rate(count))\n return transform.scale(m * learning_rate)\n\n\ndef adabelief(\n learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-16,\n eps_root: float = 1e-16) -> base.GradientTransformation:\n \"\"\"The AdaBelief optimiser.\n\n AdaBelief is an adaptive learning rate optimiser that focuses on fast\n convergence, generalisation, and stability. It adapts the step size depending\n on its \"belief\" in the gradient direction — the optimiser adaptively scales\n the step size by the difference between the predicted and observed gradients.\n AdaBelief is a modified version of Adam and contains the same number of\n parameters.\n\n References:\n Zhuang et al, 2020: https://arxiv.org/abs/2010.07468\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n b1: the exponential decay rate to track the first moment of past gradients.\n b2: the exponential decay rate to track the second moment of past gradients.\n eps: term added to the denominator to improve numerical stability.\n eps_root: term added to the second moment of the prediction error to\n improve numerical stability. If backpropagating gradients through the\n gradient transformation (e.g. for meta-learning), this must be non-zero.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_belief(b1=b1, b2=b2, eps=eps, eps_root=eps_root),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef adafactor(\n learning_rate: Optional[ScalarOrSchedule] = None,\n min_dim_size_to_factor: int = 128,\n decay_rate: float = 0.8,\n decay_offset: int = 0,\n multiply_by_parameter_scale: float = True,\n clipping_threshold: Optional[float] = 1.0,\n momentum: Optional[float] = None,\n dtype_momentum: Any = jnp.float32,\n weight_decay_rate: Optional[float] = None,\n eps: float = 1e-30,\n factored: bool = True,\n weight_decay_mask: MaskOrFn = None,\n ) -> base.GradientTransformation:\n \"\"\"The Adafactor optimiser.\n\n Adafactor is an adaptive learning rate optimiser that focuses on fast\n training of large scale neural networks. It saves memory by using a factored\n estimate of the second order moments used to scale gradients.\n\n References:\n Shazeer and Stern, 2018: https://arxiv.org/abs/1804.04235\n\n Args:\n learning_rate: (float) a step size. Note: the natural scale for\n Adafactor's LR is markedly different from Adam, one doesn't use the\n 1/sqrt(hidden) correction for this optim with attention-based models.\n min_dim_size_to_factor: (int) only factor the statistics if two array\n dimensions have at least this size.\n decay_rate: (float) controls second-moment exponential decay schedule.\n decay_offset: (int) for finetuning, one may set this to the starting\n step number of the finetuning phase.\n multiply_by_parameter_scale: (bool): if True, then scale learning_rate by\n parameter norm. if False, provided learning_rate is absolute step size.\n clipping_threshold: (float>=1) optional value; if None, clipping disabled.\n momentum: (float) optional value between 0 and 1, enables\n momentum and uses extra memory if non-None! None by default.\n dtype_momentum: (dtype) dtype of momentum buffers.\n weight_decay_rate: (float) optional rate at which to decay weights.\n eps: (float) regularization constant for root mean squared gradient.\n factored: (bool) whether to use factored second-moment estimates.\n weight_decay_mask: a tree with same structure as (or a prefix of)\n the params PyTree, or a Callable that returns such a pytree given\n the params/updates. The leaves should be booleans, `True`\n for leaves/subtrees you want to apply the transformation to,\n and `False` for those you want to skip.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n # The core of the algorithm is a procedure for rescaling gradients\n # by a factored estimate of the root mean squared gradients.\n # This reduces memory compared to algorithms such as Adam or RmsProp,\n # by not having to hold a separate estimate for each weight.\n tx = [\n factorized.scale_by_factored_rms(\n factored, decay_rate, decay_offset, min_dim_size_to_factor, eps)]\n # This basic rescaling is typically combined with one or more of the following\n # transformation (all can be disabled via adafactor's constructor args).\n if clipping_threshold is not None:\n tx.append(clipping.clip_by_block_rms(clipping_threshold))\n if learning_rate is not None:\n tx.append(_scale_by_learning_rate(learning_rate, flip_sign=False))\n if multiply_by_parameter_scale:\n tx.append(transform.scale_by_param_block_rms())\n if momentum is not None:\n tx.append(\n transform.ema(momentum, debias=False, accumulator_dtype=dtype_momentum))\n if weight_decay_rate is not None:\n tx.append(transform.add_decayed_weights(\n weight_decay_rate, mask=weight_decay_mask))\n # In gradient \"descent\" we follow the negative gradient.\n tx.append(transform.scale(-1))\n return combine.chain(*tx)\n\n\ndef adagrad(\n learning_rate: ScalarOrSchedule,\n initial_accumulator_value: float = 0.1,\n eps: float = 1e-7\n) -> base.GradientTransformation:\n \"\"\"The Adagrad optimizer.\n\n Adagrad is an algorithm for gradient based optimisation that anneals the\n learning rate for each parameter during the course of training.\n\n WARNING: Adagrad's main limit is the monotonic accumulation of squared\n gradients in the denominator: since all terms are >0, the sum keeps growing\n during training and the learning rate eventually becomes vanishingly small.\n\n References:\n Duchi et al, 2011: https://jmlr.org/papers/v12/duchi11a.html\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n initial_accumulator_value: initialisation for the accumulator.\n eps: a small constant applied to denominator inside of the square root\n (as in RMSProp) to avoid dividing by zero when rescaling.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_rss(\n initial_accumulator_value=initial_accumulator_value, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef adam(\n learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n eps_root: float = 0.0,\n mu_dtype: Optional[Any] = None,\n) -> base.GradientTransformation:\n \"\"\"The classic Adam optimiser.\n\n Adam is an SGD variant with learning rate adaptation. The `learning_rate`\n used for each weight is computed from estimates of first- and second-order\n moments of the gradients (using suitable exponential moving averages).\n\n References:\n Kingma et al, 2014: https://arxiv.org/abs/1412.6980\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n b1: the exponential decay rate to track the first moment of past gradients.\n b2: the exponential decay rate to track the second moment of past gradients.\n eps: a small constant applied to denominator outside of the square root\n (as in the Adam paper) to avoid dividing by zero when rescaling.\n eps_root: (default `0`), a small constant applied to denominator inside the\n square root (as in RMSProp), to avoid dividing by zero when rescaling.\n This is needed for example when computing (meta-)gradients through Adam.\n mu_dtype: optional `dtype` to be used for the first order accumulator; if\n `None` then the `dtype` is inferred from `params` and `updates`.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_adam(\n b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef adamw(\n learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n eps_root: float = 0.0,\n mu_dtype: Optional[Any] = None,\n weight_decay: float = 1e-4,\n mask: Optional[Union[Any, Callable[[base.Params], Any]]] = None,\n) -> base.GradientTransformation:\n \"\"\"Adam with weight decay regularization.\n\n AdamW uses weight decay to regularise learning towards small weights, as\n this leads to better generalisation. In SGD you can also use L2 regularisation\n to implement this as an additive loss term, however L2 regularization\n does not behave as intended for adaptive gradient algorithms such as Adam.\n\n WARNING: Sometimes you may want to skip weight decay for BatchNorm scale or\n for the bias parameters. You can use `optax.masked` to make your own AdamW\n variant where `additive_weight_decay` is applied only to a subset of `params`.\n\n References:\n Loshchilov et al, 2019: https://arxiv.org/abs/1711.05101\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n b1: the exponential decay rate to track the first moment of past gradients.\n b2: the exponential decay rate to track the second moment of past gradients.\n eps: a small constant applied to denominator outside of the square root\n (as in the Adam paper) to avoid dividing by zero when rescaling.\n eps_root: (default `0`), a small constant applied to denominator inside the\n square root (as in RMSProp), to avoid dividing by zero when rescaling.\n This is needed for instance when computing (meta-)gradients through Adam.\n mu_dtype: optional `dtype` to be used for the first order accumulator; if\n `None` then the `dtype` is inferred from `params` and `updates`.\n weight_decay: strength of the weight decay regularization. Note that this\n weight decay is multiplied with the learning rate. This is consistent\n with other frameworks such as PyTorch, but different from\n (Loshchilov et al, 2019) where the weight decay is only multiplied with\n the \"schedule multiplier\", but not the base learning rate.\n mask: a tree with same structure as (or a prefix of) the params PyTree,\n or a Callable that returns such a pytree given the params/updates.\n The leaves should be booleans, `True` for leaves/subtrees you want to\n apply the weight decay to, and `False` for those you want to skip. Note\n that the Adam gradient transformations are applied to all parameters.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_adam(\n b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype),\n transform.add_decayed_weights(weight_decay, mask),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef fromage(\n learning_rate: float,\n min_norm: float = 1e-6\n) -> base.GradientTransformation:\n \"\"\"The Frobenius matched gradient descent (Fromage) optimiser.\n\n Fromage is a learning algorithm that does not require learning rate tuning.\n The optimiser is based on modelling neural network gradients via deep relative\n trust (a distance function on deep neural networks). Fromage is similar to the\n LARS optimiser and can work on a range of standard neural network benchmarks,\n such as natural language Transformers and generative adversarial networks.\n\n References:\n Bernstein et al, 2020: https://arxiv.org/abs/2002.03432\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n min_norm: a minimum value that the norm of the gradient updates and the\n norm of the layer parameters can be clipped to to avoid dividing by zero\n when computing the trust ratio (as in the LARS paper).\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n mult = 1 / jnp.sqrt(1 + learning_rate ** 2)\n return combine.chain(\n transform.scale_by_trust_ratio(min_norm),\n _scale_by_learning_rate(learning_rate * mult),\n transform.add_decayed_weights((mult - 1)),\n )\n\n\ndef lars(\n learning_rate: ScalarOrSchedule,\n weight_decay: float = 0.,\n weight_decay_mask: MaskOrFn = True,\n trust_coefficient: float = 0.001,\n eps: float = 0.,\n trust_ratio_mask: MaskOrFn = True,\n momentum: float = 0.9,\n nesterov: bool = False,\n) -> base.GradientTransformation:\n \"\"\"The LARS optimiser.\n\n LARS is a layer-wise adaptive optimiser introduced to help scale SGD to\n larger batch sizes. LARS later inspired the LAMB optimiser.\n\n References:\n You et al, 2017: https://arxiv.org/abs/1708.03888\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n weight_decay (default `0.`): strength of the weight decay regularization.\n weight_decay_mask: a tree with same structure as (or a prefix of) the params\n PyTree, or a Callable that returns such a pytree given the params/updates.\n The leaves should be booleans, `True` for leaves/subtrees you want to\n apply the transformation to, and `False` for those you want to skip.\n trust_coefficient: a multiplier for the trust ratio.\n eps: optional additive constant in the trust ratio denominator.\n trust_ratio_mask: a tree with same structure as (or a prefix of) the params\n PyTree, or a Callable that returns such a pytree given the params/updates.\n The leaves should be booleans, `True` for leaves/subtrees you want to\n apply the transformation to, and `False` for those you want to skip.\n momentum: the decay rate for momentum.\n nesterov: whether to use Nesterov momentum.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.add_decayed_weights(weight_decay, mask=weight_decay_mask),\n wrappers.masked(\n inner=transform.scale_by_trust_ratio(\n trust_coefficient=trust_coefficient, eps=eps),\n mask=trust_ratio_mask),\n _scale_by_learning_rate(learning_rate),\n transform.trace(decay=momentum, nesterov=nesterov),\n )\n\n\ndef lamb(\n learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-6,\n eps_root: float = 0.0,\n weight_decay: float = 0.,\n mask: MaskOrFn = None,\n) -> base.GradientTransformation:\n \"\"\"The LAMB optimiser.\n\n LAMB is a general purpose layer-wise adaptive large batch optimiser designed\n to provide consistent training performance across a wide range of tasks,\n including those that use attention-based models (such as Transformers) and\n ResNet-50. The optimiser is able to work with small and large batch sizes.\n LAMB was inspired by the LARS learning algorithm.\n\n References:\n You et al, 2019: https://arxiv.org/abs/1904.00962\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n b1: the exponential decay rate to track the first moment of past gradients.\n b2: the exponential decay rate to track the second moment of past gradients.\n eps: a small constant applied to denominator outside of the square root\n (as in the Adam paper) to avoid dividing by zero when rescaling.\n eps_root: (default `0.0`), a small constant applied to denominator inside\n the square root (as in RMSProp), to avoid dividing by zero when rescaling.\n This is needed for instance when computing (meta-)gradients through Adam.\n weight_decay (default `0.`): strength of the weight decay regularization.\n mask: a tree with same structure as (or a prefix of) the params PyTree,\n or a Callable that returns such a pytree given the params/updates.\n The leaves should be booleans, `True` for leaves/subtrees you want to\n apply the transformation to, and `False` for those you want to skip.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root),\n transform.add_decayed_weights(weight_decay=weight_decay, mask=mask),\n transform.scale_by_trust_ratio(),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef noisy_sgd(\n learning_rate: ScalarOrSchedule,\n eta: float = 0.01,\n gamma: float = 0.55,\n seed: int = 0\n) -> base.GradientTransformation:\n r\"\"\"A variant of SGD with added noise.\n\n It has been found that adding noise to the gradients can improve\n both the training error and the generalisation error in very deep networks.\n\n References:\n Neelakantan et al, 2014: https://arxiv.org/abs/1511.06807\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n eta: the initial variance for the gaussian noise added to gradients.\n gamma: a parameter controlling the annealing of noise over time,\n the variance decays according to `(1+t)^-\\gamma`.\n seed: the seed for the pseudo-random generation process.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.add_noise(eta, gamma, seed),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef radam(\n learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n eps_root: float = 0.0,\n threshold: float = 5.0\n) -> base.GradientTransformation:\n \"\"\"The Rectified Adam optimiser.\n\n The adaptive learning rate in Adam has undesirably large variance in early\n stages of training, due to the limited number of training samples used to\n estimate the optimiser's statistics. Rectified Adam addresses this issue\n by analytically reducing the large variance.\n\n References:\n Kingma et al, 2014: https://arxiv.org/abs/1412.6980\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n b1: the exponential decay rate to track the first moment of past gradients.\n b2: the exponential decay rate to track the second moment of past gradients.\n eps: a small constant applied to denominator outside of the square root\n (as in the Adam paper) to avoid dividing by zero when rescaling.\n eps_root: (default `0`), a small constant applied to denominator inside the\n square root (as in RMSProp), to avoid dividing by zero when rescaling.\n This is needed for instance when computing (meta-)gradients through Adam.\n threshold: the threshold for variance tractability.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_radam(\n b1=b1, b2=b2, eps=eps, eps_root=eps_root, threshold=threshold),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef rmsprop(\n learning_rate: ScalarOrSchedule,\n decay: float = 0.9,\n eps: float = 1e-8,\n initial_scale: float = 0.,\n centered: bool = False,\n momentum: Optional[float] = None,\n nesterov: bool = False\n) -> base.GradientTransformation:\n # pylint: disable=line-too-long\n \"\"\"A flexible RMSProp optimiser.\n\n RMSProp is an SGD variant with learning rate adaptation. The `learning_rate`\n used for each weight is scaled by a suitable estimate of the magnitude of the\n gradients on previous steps. Several variants of RMSProp can be found\n in the literature. This alias provides an easy to configure RMSProp\n optimiser that can be used to switch between several of these variants.\n\n References:\n Tieleman and Hinton, 2012: http://www.cs.toronto.edu/~hinton/coursera/lecture6/lec6.pdf\n Graves, 2013: https://arxiv.org/abs/1308.0850\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n decay: the decay used to track the magnitude of previous gradients.\n eps: a small numerical constant to avoid dividing by zero when rescaling.\n initial_scale: (default `0.`), initialisation of accumulators tracking the\n magnitude of previous updates. PyTorch uses `0`, TF1 uses `1`. When\n reproducing results from a paper, verify the value used by the authors.\n centered: (default `False`), whether the second moment or the variance of\n the past gradients is used to rescale the latest gradients.\n momentum: (default `None`), the `decay` rate used by the momentum term,\n when it is set to `None`, then momentum is not used at all.\n nesterov (default `False`): whether nesterov momentum is used.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n # pylint: enable=line-too-long\n if centered:\n return combine.chain(\n transform.scale_by_stddev(\n decay=decay, eps=eps, initial_scale=initial_scale),\n _scale_by_learning_rate(learning_rate),\n (transform.trace(decay=momentum, nesterov=nesterov)\n if momentum is not None else base.identity())\n )\n return combine.chain(\n transform.scale_by_rms(\n decay=decay, eps=eps, initial_scale=initial_scale),\n _scale_by_learning_rate(learning_rate),\n (transform.trace(decay=momentum, nesterov=nesterov)\n if momentum is not None else base.identity())\n )\n\n\ndef sgd(\n learning_rate: ScalarOrSchedule,\n momentum: Optional[float] = None,\n nesterov: bool = False,\n accumulator_dtype: Optional[Any] = None,\n) -> base.GradientTransformation:\n \"\"\"A canonical Stochastic Gradient Descent optimiser.\n\n This implements stochastic gradient descent. It also includes support for\n momentum, and nesterov acceleration, as these are standard practice when\n using stochastic gradient descent to train deep neural networks.\n\n References:\n Sutskever et al, 2013: http://proceedings.mlr.press/v28/sutskever13.pdf\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n momentum: (default `None`), the `decay` rate used by the momentum term,\n when it is set to `None`, then momentum is not used at all.\n nesterov (default `False`): whether nesterov momentum is used.\n accumulator_dtype: optional `dtype` to be used for the accumulator; if\n `None` then the `dtype` is inferred from `params` and `updates`.\n\n Returns:\n A `GradientTransformation`.\n \"\"\"\n return combine.chain(\n (transform.trace(decay=momentum, nesterov=nesterov,\n accumulator_dtype=accumulator_dtype)\n if momentum is not None else base.identity()),\n _scale_by_learning_rate(learning_rate)\n )\n\n\ndef sm3(\n learning_rate: float,\n momentum: float = 0.9\n) -> base.GradientTransformation:\n \"\"\"The SM3 optimiser.\n\n SM3 (Square-root of Minima of Sums of Maxima of Squared-gradients Method) is a\n memory-efficient adaptive optimiser designed to decrease memory overhead when\n training very large models, such as the Transformer for machine translation,\n BERT for language modelling, and AmoebaNet-D for image classification. SM3: 1)\n applies to tensors of arbitrary dimensions and any predefined cover of the\n parameters; 2) adapts the learning rates in an adaptive and data-driven manner\n (like Adagrad and unlike Adafactor); and 3) comes with rigorous convergence\n guarantees in stochastic convex optimization settings.\n\n References:\n Anil et al, 2019: https://arxiv.org/abs/1901.11150\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n momentum: the `decay` rate used by the momentum term (when it is not set to\n `None`, then momentum is not used at all).\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_sm3(momentum),\n transform.scale(-learning_rate),\n )\n\n\ndef yogi(\n learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-3,\n) -> base.GradientTransformation:\n \"\"\"The Yogi optimiser.\n\n Yogi is an adaptive optimiser, which provides control in tuning the effective\n learning rate to prevent it from increasing. By doing so, it focuses on\n addressing the issues of convergence and generalisation in exponential moving\n average-based adaptive methods (such as Adam and RMSprop). Yogi is a\n modification of Adam and uses the same parameters.\n\n References:\n Zaheer et al, 2020: http://www.sanjivk.com/yogi_nips2018.pdf\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n b1: the exponential decay rate to track the first moment of past gradients.\n b2: the exponential decay rate to track the second moment of past gradients.\n eps: a small constant applied to denominator outside of the square root\n (as in the Adam paper) to avoid dividing by zero when rescaling.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_yogi(b1=b1, b2=b2, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef dpsgd(\n learning_rate: ScalarOrSchedule,\n l2_norm_clip: float,\n noise_multiplier: float,\n seed: int,\n momentum: Optional[float] = None,\n nesterov: bool = False\n) -> base.GradientTransformation:\n \"\"\"The DPSGD optimiser.\n\n Differential privacy is a standard for privacy guarantees of algorithms\n learning from aggregate databases including potentially sensitive information.\n DPSGD offers protection against a strong adversary with full knowledge of the\n training mechanism and access to the model’s parameters.\n\n WARNING: This `GradientTransformation` expects input updates to have a batch\n dimension on the 0th axis. That is, this function expects per-example\n gradients as input (which are easy to obtain in JAX using `jax.vmap`).\n\n References:\n Abadi et al, 2016: https://arxiv.org/abs/1607.00133\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n l2_norm_clip: maximum L2 norm of the per-example gradients.\n noise_multiplier: ratio of standard deviation to the clipping norm.\n seed: initial seed used for the jax.random.PRNGKey\n momentum: (default `None`), the `decay` rate used by the momentum term,\n when it is set to `None`, then momentum is not used at all.\n nesterov (default `False`): whether nesterov momentum is used.\n\n Returns:\n A `GradientTransformation`.\n \"\"\"\n return combine.chain(\n privacy.differentially_private_aggregate(\n l2_norm_clip=l2_norm_clip,\n noise_multiplier=noise_multiplier,\n seed=seed),\n (transform.trace(decay=momentum, nesterov=nesterov)\n if momentum is not None else base.identity()),\n _scale_by_learning_rate(learning_rate)\n )\n\n\ndef adamax(\n learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n) -> base.GradientTransformation:\n \"\"\"A variant of the Adam optimizer that uses the infinity norm.\n\n References:\n Kingma et al, 2014: https://arxiv.org/abs/1412.6980\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n b1: the exponential decay rate to track the first moment of past gradients.\n b2: the exponential decay rate to track the maximum of past gradients.\n eps: a small constant applied to denominator to avoid dividing by zero when\n rescaling.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_adamax(b1=b1, b2=b2, eps=eps,),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef adamaxw(\n learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n weight_decay: float = 1e-4,\n mask: Optional[Union[Any, Callable[[base.Params], Any]]] = None,\n) -> base.GradientTransformation:\n \"\"\"Adamax with weight decay regularization.\n\n AdamaxW uses weight decay to regularise learning towards small weights, as\n this leads to better generalisation. In SGD you can also use L2 regularisation\n to implement this as an additive loss term, however L2 regularization\n does not behave as intended for adaptive gradient algorithms such as Adam.\n\n WARNING: Sometimes you may want to skip weight decay for BatchNorm scale or\n for the bias parameters. You can use `optax.masked` to make your own AdamaxW\n variant where `additive_weight_decay` is applied only to a subset of `params`.\n\n References:\n Loshchilov et al, 2019: https://arxiv.org/abs/1711.05101\n\n Args:\n learning_rate: this is a fixed global scaling factor.\n b1: the exponential decay rate to track the first moment of past gradients.\n b2: the exponential decay rate to track the maximum of past gradients.\n eps: a small constant applied to denominator to avoid dividing by zero when\n rescaling.\n weight_decay: strength of the weight decay regularization. Note that this\n weight decay is multiplied with the learning rate. This is consistent\n with other frameworks such as PyTorch, but different from\n (Loshchilov et al, 2019) where the weight decay is only multiplied with\n the \"schedule multiplier\", but not the base learning rate.\n mask: a tree with same structure as (or a prefix of) the params PyTree,\n or a Callable that returns such a pytree given the params/updates.\n The leaves should be booleans, `True` for leaves/subtrees you want to\n apply the weight decay to, and `False` for those you want to skip. Note\n that the Adamax gradient transformations are applied to all parameters.\n\n Returns:\n the corresponding `GradientTransformation`.\n \"\"\"\n return combine.chain(\n transform.scale_by_adamax(b1=b1, b2=b2, eps=eps),\n transform.add_decayed_weights(weight_decay, mask),\n _scale_by_learning_rate(learning_rate),\n )\n",
"path": "optax/_src/alias.py"
}
] | diff --git a/optax/_src/alias.py b/optax/_src/alias.py
index 423c1856f..203621eb9 100644
--- a/optax/_src/alias.py
+++ b/optax/_src/alias.py
@@ -430,8 +430,8 @@ def noisy_sgd(
the corresponding `GradientTransformation`.
"""
return combine.chain(
- _scale_by_learning_rate(learning_rate),
transform.add_noise(eta, gamma, seed),
+ _scale_by_learning_rate(learning_rate),
)
|
cloudtools__troposphere-2174 | DeletionPolicy: add RetainExceptOnCreate option
## Ask ##
Request to add the new RetainExceptOnCreate option for the DeletionPolicy attribute. Would reduce effort for failed stack operations by eliminating the need for manual work deleting unused resources before a rollback; those resources already exist and cause failures upon retry since they can not be created again
## Documentation ##
- [RetainExceptOnDelete announcement](https://aws.amazon.com/about-aws/whats-new/2023/07/aws-cloudformation-deletion-policies-dev-test-cycle/)
- [DeletionPolicy attribute options](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-deletionpolicy.html)
| [
{
"content": "# Copyright (c) 2012-2022, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\nfrom __future__ import annotations\n\nimport collections.abc\nimport json\nimport re\nimport sys\nimport types\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n ClassVar,\n Dict,\n List,\n NoReturn,\n Optional,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n cast,\n overload,\n)\n\nimport cfn_flip # type: ignore\n\nfrom . import validators\n\nif TYPE_CHECKING:\n from .type_defs.protocols import JSONreprProtocol, ToDictProtocol\n\n # We cannot `from .type_defs.compat import Final` here for now\n # https://github.com/microsoft/pyright/issues/4197\n if sys.version_info < (3, 8):\n from typing_extensions import Final\n else:\n from typing import Final\n\n__version__ = \"4.4.0\"\n\n# constants for DeletionPolicy and UpdateReplacePolicy\nDelete: Final = \"Delete\"\nRetain: Final = \"Retain\"\nSnapshot: Final = \"Snapshot\"\n\n# Pseudo Parameters\nAWS_ACCOUNT_ID: Final = \"AWS::AccountId\"\nAWS_NOTIFICATION_ARNS: Final = \"AWS::NotificationARNs\"\nAWS_NO_VALUE: Final = \"AWS::NoValue\"\nAWS_PARTITION: Final = \"AWS::Partition\"\nAWS_REGION: Final = \"AWS::Region\"\nAWS_STACK_ID: Final = \"AWS::StackId\"\nAWS_STACK_NAME: Final = \"AWS::StackName\"\nAWS_URL_SUFFIX: Final = \"AWS::URLSuffix\"\n\n# Template Limits\nMAX_MAPPINGS: Final[int] = 200\nMAX_OUTPUTS: Final[int] = 200\nMAX_PARAMETERS: Final[int] = 200\nMAX_RESOURCES: Final[int] = 500\nPARAMETER_TITLE_MAX: Final[int] = 255\n\n\nvalid_names = re.compile(r\"^[a-zA-Z0-9]+$\")\n\n\ndef is_aws_object_subclass(cls: Any) -> bool:\n is_aws_object = False\n try:\n is_aws_object = issubclass(cls, BaseAWSObject)\n # prop_type isn't a class\n except TypeError:\n pass\n return is_aws_object\n\n\n@overload\ndef encode_to_dict(\n obj: Union[Dict[str, Any], JSONreprProtocol, ToDictProtocol]\n) -> Dict[str, Any]:\n ...\n\n\n@overload\ndef encode_to_dict(obj: Union[List[Any], Tuple[Any]]) -> List[Dict[str, Any]]:\n ...\n\n\n@overload\ndef encode_to_dict(obj: Optional[str]) -> Optional[str]:\n ...\n\n\ndef encode_to_dict(\n obj: Union[\n Dict[str, Any], List[Any], JSONreprProtocol, ToDictProtocol, Tuple[Any], Any\n ]\n) -> Union[Dict[str, Any], List[Any], Any]:\n if hasattr(obj, \"to_dict\"):\n # Calling encode_to_dict to ensure object is\n # nomalized to a base dictionary all the way down.\n return encode_to_dict(cast(\"ToDictProtocol\", obj).to_dict())\n\n if isinstance(obj, (list, tuple)):\n new_lst: List[Dict[str, Any]] = []\n for o in obj:\n new_lst.append(encode_to_dict(o))\n return new_lst\n\n if isinstance(obj, dict):\n props: Dict[str, Any] = {}\n for name, prop in obj.items():\n props[name] = encode_to_dict(prop)\n return props\n\n # This is useful when dealing with external libs using\n # this format. Specifically awacs.\n if hasattr(obj, \"JSONrepr\"):\n return encode_to_dict(cast(\"JSONreprProtocol\", obj).JSONrepr())\n\n return obj\n\n\ndef depends_on_helper(\n obj: Optional[Union[List[object], object]]\n) -> Union[Optional[str], List[Optional[str]], List[Any], Any]:\n \"\"\"Handles using .title if the given object is a troposphere resource.\n\n If the given object is a troposphere resource, use the `.title` attribute\n of that resource. If it's a string, just use the string. This should allow\n more pythonic use of DependsOn.\n \"\"\"\n if isinstance(obj, AWSObject):\n return obj.title\n elif isinstance(obj, list):\n return list(map(depends_on_helper, cast(List[object], obj)))\n return obj\n\n\n__BaseAWSObjectTypeVar = TypeVar(\"__BaseAWSObjectTypeVar\", bound=\"BaseAWSObject\")\n\n\nclass BaseAWSObject:\n attributes: List[str]\n dictname: Optional[str]\n do_validation: bool\n properties: Dict[str, Any]\n propnames: Set[str]\n props: ClassVar[\n Dict[str, Tuple[Union[Tuple[type, ...], type, Callable[[Any], Any]], bool]]\n ] = {}\n resource: Dict[str, Any]\n resource_type: Optional[str]\n template: Optional[Template]\n title: Optional[str]\n\n def __init__(\n self,\n title: Optional[str],\n template: Optional[Template] = None,\n validation: bool = True,\n **kwargs: Any,\n ) -> None:\n self.title = title\n self.template = template\n self.do_validation = validation\n # Cache the keys for validity checks\n self.propnames = set(self.props.keys())\n self.attributes = [\n \"Condition\",\n \"CreationPolicy\",\n \"DeletionPolicy\",\n \"DependsOn\",\n \"Metadata\",\n \"UpdatePolicy\",\n \"UpdateReplacePolicy\",\n ]\n\n # try to validate the title if its there\n if self.title:\n self.validate_title()\n\n # Create the list of properties set on this object by the user\n self.properties = {}\n dictname = getattr(self, \"dictname\", None)\n if dictname:\n self.resource = {\n dictname: self.properties,\n }\n else:\n self.resource = self.properties\n if hasattr(self, \"resource_type\") and self.resource_type is not None:\n self.resource[\"Type\"] = self.resource_type\n self.__initialized = True\n\n # Check for properties defined in the class\n for k, (_, _required) in self.props.items():\n v = getattr(type(self), k, None)\n if v is not None and k not in kwargs:\n self.__setattr__(k, v)\n\n # Now that it is initialized, populate it with the kwargs\n for k, v in kwargs.items():\n self.__setattr__(k, v)\n\n self.add_to_template()\n\n def add_to_template(self) -> None:\n # Bound it to template if we know it\n if self.template is not None:\n self.template.add_resource(self)\n\n def __getattr__(self, name: str) -> Any:\n # If pickle loads this object, then __getattr__ will cause\n # an infinite loop when pickle invokes this object to look for\n # __setstate__ before attributes is \"loaded\" into this object.\n # Therefore, short circuit the rest of this call if attributes\n # is not loaded yet.\n if \"attributes\" not in self.__dict__:\n raise AttributeError(name)\n try:\n if name in self.attributes:\n return self.resource[name]\n else:\n return self.properties.__getitem__(name)\n except KeyError:\n # Fall back to the name attribute in the object rather than\n # in the properties dict. This is for non-OpenStack backwards\n # compatibility since OpenStack objects use a \"name\" property.\n if name == \"name\":\n return self.__getattribute__(\"title\")\n raise AttributeError(name)\n\n def __setattr__(self, name: str, value: Any) -> None:\n if (\n name in self.__dict__.keys()\n or \"_BaseAWSObject__initialized\" not in self.__dict__\n ):\n return dict.__setattr__(self, name, value) # type: ignore\n elif name in self.attributes:\n if name == \"DependsOn\":\n self.resource[name] = depends_on_helper(value)\n else:\n self.resource[name] = value\n return None\n elif name in self.propnames:\n # Check the type of the object and compare against what we were\n # expecting.\n expected_type = self.props[name][0]\n\n # If the value is a AWSHelperFn we can't do much validation\n # we'll have to leave that to Amazon. Maybe there's another way\n # to deal with this that we'll come up with eventually\n if isinstance(value, AWSHelperFn):\n return self.properties.__setitem__(name, value)\n\n # If it's a function, call it...\n elif isinstance(expected_type, types.FunctionType):\n try:\n value = expected_type(value)\n except Exception:\n sys.stderr.write(\n \"%s: %s.%s function validator '%s' threw \"\n \"exception:\\n\"\n % (self.__class__, self.title, name, expected_type.__name__)\n )\n raise\n return self.properties.__setitem__(name, value)\n\n # If it's a list of types, check against those types...\n elif isinstance(expected_type, list):\n # If we're expecting a list, then make sure it is a list\n if not isinstance(value, list):\n self._raise_type(name, value, expected_type)\n\n # Special case a list of a single validation function\n if len(expected_type) == 1 and isinstance(\n expected_type[0], types.FunctionType\n ):\n new_value = list(map(expected_type[0], value)) # type: ignore\n return self.properties.__setitem__(name, new_value)\n\n # Iterate over the list and make sure it matches our\n # type checks (as above accept AWSHelperFn because\n # we can't do the validation ourselves)\n for v in cast(List[Any], value):\n if not isinstance(v, tuple(expected_type)) and not isinstance(\n v, AWSHelperFn\n ):\n self._raise_type(name, v, expected_type)\n # Validated so assign it\n return self.properties.__setitem__(name, value)\n\n # Final validity check, compare the type of value against\n # expected_type which should now be either a single type or\n # a tuple of types.\n elif isinstance(value, cast(type, expected_type)):\n return self.properties.__setitem__(name, value)\n else:\n self._raise_type(name, value, expected_type)\n\n type_name = getattr(self, \"resource_type\", self.__class__.__name__)\n\n if type_name == \"AWS::CloudFormation::CustomResource\" or type_name.startswith(\n \"Custom::\"\n ):\n # Add custom resource arguments to the dict without any further\n # validation. The properties of a CustomResource is not known.\n return self.properties.__setitem__(name, value)\n\n raise AttributeError(\n \"%s object does not support attribute %s\" % (type_name, name)\n )\n\n def _raise_type(self, name: str, value: Any, expected_type: Any) -> NoReturn:\n raise TypeError(\n \"%s: %s.%s is %s, expected %s\"\n % (self.__class__, self.title, name, type(value), expected_type)\n )\n\n def validate_title(self) -> None:\n if not self.title or not valid_names.match(self.title):\n raise ValueError('Name \"%s\" not alphanumeric' % self.title)\n\n def validate(self) -> None:\n pass\n\n def no_validation(self: __BaseAWSObjectTypeVar) -> __BaseAWSObjectTypeVar:\n self.do_validation = False\n return self\n\n def to_dict(self) -> Dict[str, Any]:\n if self.do_validation:\n self._validate_props()\n self.validate()\n\n if self.properties:\n return encode_to_dict(self.resource)\n elif hasattr(self, \"resource_type\"):\n d: Dict[str, Any] = {}\n for k, v in self.resource.items():\n if k != \"Properties\":\n d[k] = v\n return d\n else:\n return {}\n\n @classmethod\n def _from_dict(\n cls: Type[__BaseAWSObjectTypeVar], title: Optional[str] = None, **kwargs: Any\n ) -> __BaseAWSObjectTypeVar:\n props: Dict[str, Any] = {}\n for prop_name, value in kwargs.items():\n try:\n prop_attrs = cls.props[prop_name]\n except KeyError:\n raise AttributeError(\n \"Object type %s does not have a \"\n \"%s property.\" % (cls.__name__, prop_name)\n )\n prop_type = prop_attrs[0]\n value = kwargs[prop_name]\n is_aws_object = is_aws_object_subclass(prop_type)\n if is_aws_object:\n if not isinstance(value, collections.abc.Mapping):\n raise ValueError(\n \"Property definition for %s must be \"\n \"a Mapping type\" % prop_name\n )\n value = cast(BaseAWSObject, prop_type)._from_dict(**value)\n\n if isinstance(prop_type, list):\n if not isinstance(value, list):\n raise TypeError(\"Attribute %s must be a \" \"list.\" % prop_name)\n new_value: List[Any] = []\n for v in cast(List[Any], value):\n new_v = v\n if is_aws_object_subclass(prop_type[0]):\n if not isinstance(v, collections.abc.Mapping):\n raise ValueError(\n \"Property definition for %s must be \"\n \"a list of Mapping types\" % prop_name\n )\n new_v = cast(BaseAWSObject, prop_type[0])._from_dict(**v)\n new_value.append(new_v)\n value = new_value\n props[prop_name] = value\n if title:\n return cls(title, **props)\n return cls(**props)\n\n @classmethod\n def from_dict(\n cls: Type[__BaseAWSObjectTypeVar], title: str, d: Dict[str, Any]\n ) -> __BaseAWSObjectTypeVar:\n return cls._from_dict(title, **d)\n\n def _validate_props(self) -> None:\n for k, (_, required) in self.props.items():\n if required and k not in self.properties:\n rtype = getattr(self, \"resource_type\", type(self))\n title = getattr(self, \"title\")\n msg = \"Resource %s required in type %s\" % (k, rtype)\n if title:\n msg += \" (title: %s)\" % title\n raise ValueError(msg)\n\n\nclass AWSObject(BaseAWSObject):\n dictname = \"Properties\"\n\n def ref(self) -> Ref:\n return Ref(self)\n\n Ref = ref\n\n def get_att(self, value: str) -> GetAtt:\n return GetAtt(self, value)\n\n GetAtt = get_att\n\n\nclass AWSDeclaration(BaseAWSObject):\n \"\"\"\n Used for CloudFormation Resource Property objects\n http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/\n aws-product-property-reference.html\n \"\"\"\n\n def __init__(self, title: str, **kwargs: Any) -> None:\n super().__init__(title, **kwargs)\n\n def ref(self) -> Ref:\n return Ref(self)\n\n Ref = ref\n\n\nclass AWSProperty(BaseAWSObject):\n \"\"\"\n Used for CloudFormation Resource Property objects\n http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/\n aws-product-property-reference.html\n \"\"\"\n\n dictname = None\n\n def __init__(self, title: Optional[str] = None, **kwargs: Any) -> None:\n super().__init__(title, **kwargs)\n\n\nclass AWSAttribute(BaseAWSObject):\n \"\"\"\n Used for CloudFormation Resource Attribute objects\n http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/\n aws-product-attribute-reference.html\n \"\"\"\n\n dictname = None\n\n def __init__(self, title: Optional[str] = None, **kwargs: Any) -> None:\n super().__init__(title, **kwargs)\n\n\ndef validate_delimiter(delimiter: object) -> None:\n if not isinstance(delimiter, str):\n raise ValueError(\"Delimiter must be a String, %s provided\" % type(delimiter))\n\n\ndef validate_pausetime(pausetime: str) -> str:\n if not pausetime.startswith(\"PT\"):\n raise ValueError(\"PauseTime should look like PT#H#M#S\")\n return pausetime\n\n\nclass AWSHelperFn:\n data: Any\n\n def getdata(self, data: object) -> Any:\n if isinstance(data, BaseAWSObject):\n return data.title\n else:\n return data\n\n def to_dict(self) -> Any:\n return encode_to_dict(self.data)\n\n\nclass GenericHelperFn(AWSHelperFn):\n \"\"\"Used as a fallback for the template generator\"\"\"\n\n def __init__(self, data: Any):\n self.data = self.getdata(data)\n\n def to_dict(self) -> Any:\n return encode_to_dict(self.data)\n\n\nclass Base64(AWSHelperFn):\n def __init__(self, data: Any) -> None:\n self.data = {\"Fn::Base64\": data}\n\n\nclass FindInMap(AWSHelperFn):\n def __init__(\n self, mapname: object, toplevelkey: object, secondlevelkey: object\n ) -> None:\n self.data = {\n \"Fn::FindInMap\": [self.getdata(mapname), toplevelkey, secondlevelkey]\n }\n\n\nclass GetAtt(AWSHelperFn):\n def __init__(self, logicalName: object, attrName: object) -> None: # noqa: N803\n self.data = {\"Fn::GetAtt\": [self.getdata(logicalName), attrName]}\n\n\nclass Cidr(AWSHelperFn):\n def __init__(\n self, ipblock: object, count: object, sizemask: Optional[object] = None\n ) -> None:\n if sizemask:\n self.data = {\"Fn::Cidr\": [ipblock, count, sizemask]}\n else:\n self.data = {\"Fn::Cidr\": [ipblock, count]}\n\n\nclass GetAZs(AWSHelperFn):\n def __init__(self, region: object = \"\") -> None:\n self.data = {\"Fn::GetAZs\": region}\n\n\nclass If(AWSHelperFn):\n def __init__(self, cond: object, true: object, false: object) -> None:\n self.data = {\"Fn::If\": [self.getdata(cond), true, false]}\n\n\nclass Equals(AWSHelperFn):\n def __init__(self, value_one: object, value_two: object) -> None:\n self.data = {\"Fn::Equals\": [value_one, value_two]}\n\n\nclass And(AWSHelperFn):\n def __init__(self, cond_one: object, cond_two: object, *conds: object) -> None:\n self.data = {\"Fn::And\": [cond_one, cond_two] + list(conds)}\n\n\nclass Or(AWSHelperFn):\n def __init__(self, cond_one: object, cond_two: object, *conds: object) -> None:\n self.data = {\"Fn::Or\": [cond_one, cond_two] + list(conds)}\n\n\nclass Not(AWSHelperFn):\n def __init__(self, cond: object) -> None:\n self.data = {\"Fn::Not\": [self.getdata(cond)]}\n\n\nclass Join(AWSHelperFn):\n def __init__(self, delimiter: object, values: object) -> None:\n validate_delimiter(delimiter)\n self.data = {\"Fn::Join\": [delimiter, values]}\n\n\nclass Split(AWSHelperFn):\n def __init__(self, delimiter: object, values: object) -> None:\n validate_delimiter(delimiter)\n self.data = {\"Fn::Split\": [delimiter, values]}\n\n\nclass Sub(AWSHelperFn):\n def __init__(\n self,\n input_str: object,\n dict_values: Optional[Dict[str, Any]] = None,\n **values: Any,\n ) -> None:\n # merge dict\n if dict_values:\n values.update(dict_values)\n self.data = {\"Fn::Sub\": [input_str, values] if values else input_str}\n\n\nclass Name(AWSHelperFn):\n def __init__(self, data: object) -> None:\n self.data = self.getdata(data)\n\n\nclass Select(AWSHelperFn):\n def __init__(self, indx: object, objects: object) -> None:\n self.data = {\"Fn::Select\": [indx, objects]}\n\n\nclass Ref(AWSHelperFn):\n def __init__(self, data: object) -> None:\n self.data = {\"Ref\": self.getdata(data)}\n\n def __eq__(self, other: Any) -> bool:\n if isinstance(other, self.__class__):\n return self.data == other.data\n return list(self.data.values())[0] == other\n\n def __hash__(self) -> int:\n return hash(list(self.data.values())[0])\n\n\n# The type of the props dict\nPropsDictType = Dict[\n str,\n Tuple[\n Union[\n str,\n AWSProperty,\n AWSHelperFn,\n Callable[[Any], Any],\n Dict[str, Any],\n List[Any],\n Tuple[type, ...],\n ],\n bool,\n ],\n]\n\n# Pseudo Parameter Ref's\nAccountId = Ref(AWS_ACCOUNT_ID)\nNotificationARNs = Ref(AWS_NOTIFICATION_ARNS)\nNoValue = Ref(AWS_NO_VALUE)\nPartition = Ref(AWS_PARTITION)\nRegion = Ref(AWS_REGION)\nStackId = Ref(AWS_STACK_ID)\nStackName = Ref(AWS_STACK_NAME)\nURLSuffix = Ref(AWS_URL_SUFFIX)\n\n\nclass Condition(AWSHelperFn):\n def __init__(self, data: object) -> None:\n self.data = {\"Condition\": self.getdata(data)}\n\n\nclass ImportValue(AWSHelperFn):\n def __init__(self, data: object) -> None:\n self.data = {\"Fn::ImportValue\": data}\n\n\nclass Tag(AWSHelperFn):\n def __init__(self, k: object, v: object) -> None:\n self.data = {\n \"Key\": k,\n \"Value\": v,\n }\n\n\nclass Tags(AWSHelperFn):\n tags: List[Union[AWSHelperFn, Dict[Any, Any]]]\n\n def __init__(self, *args: object, **kwargs: Any):\n self.tags = []\n tag_dict: Dict[Any, Any]\n if not args:\n # Assume kwargs variant\n tag_dict = kwargs\n else:\n tag_dict = {}\n for arg in args:\n # Validate argument passed in is an AWSHelperFn or...\n if isinstance(arg, AWSHelperFn):\n self.tags.append(arg)\n # Validate argument passed in is a dict\n elif isinstance(arg, dict):\n tag_dict.update(cast(Dict[str, Any], arg))\n else:\n raise TypeError(\n \"Tags needs to be either kwargs, dict, or AWSHelperFn\"\n )\n\n def add_tag(\n tag_list: List[Union[AWSHelperFn, Dict[Any, Any]]], k: object, v: object\n ):\n tag_list.append(\n {\n \"Key\": k,\n \"Value\": v,\n }\n )\n\n # Detect and handle non-string Tag items which do not sort in Python3\n if all(isinstance(k, str) for k in tag_dict):\n for k, v in sorted(tag_dict.items()):\n add_tag(self.tags, k, v)\n else:\n for k, v in tag_dict.items():\n add_tag(self.tags, k, v)\n\n # allow concatenation of the Tags object via '+' operator\n def __add__(self, newtags: Tags) -> Tags:\n newtags.tags = self.tags + newtags.tags\n return newtags\n\n def to_dict(self) -> List[Any]:\n return [encode_to_dict(tag) for tag in self.tags]\n\n @classmethod\n def from_dict(cls, title: Optional[str] = None, **kwargs: Any):\n return cls(**kwargs)\n\n\n__OutputTypeVar = TypeVar(\"__OutputTypeVar\", \"Output\", List[\"Output\"])\n__ParameterTypeVar = TypeVar(\"__ParameterTypeVar\", \"Parameter\", List[\"Parameter\"])\n__ResourceTypeVar = TypeVar(\n \"__ResourceTypeVar\", bound=Union[BaseAWSObject, List[BaseAWSObject]]\n)\n__UpdateTypeVar = TypeVar(\n \"__UpdateTypeVar\",\n bound=Union[BaseAWSObject, List[BaseAWSObject], List[\"Output\"], List[\"Parameter\"]],\n)\n\n\nclass Template:\n from troposphere.serverless import Globals\n\n conditions: Dict[str, Union[AWSHelperFn, Condition]]\n description: Optional[str]\n globals: Optional[Globals]\n mappings: Dict[str, Dict[str, Any]]\n metadata: Dict[str, Any]\n outputs: Dict[str, Output]\n parameters: Dict[str, Parameter]\n props: Dict[str, Tuple[type, bool]] = {\n \"AWSTemplateFormatVersion\": (str, False),\n \"Transform\": (str, False),\n \"Description\": (str, False),\n \"Parameters\": (dict, False),\n \"Mappings\": (dict, False),\n \"Resources\": (dict, False),\n \"Globals\": (Globals, False),\n \"Outputs\": (dict, False),\n \"Rules\": (dict, False),\n }\n resources: Dict[str, AWSObject]\n rules: Dict[str, Any]\n transform: Optional[Union[List[object], str]]\n version: Optional[str]\n\n def __init__(\n self,\n Description: Optional[str] = None,\n Metadata: Optional[Dict[str, Any]] = None,\n ): # noqa: N803\n self.description = Description\n self.metadata = {} if Metadata is None else Metadata\n self.conditions = {}\n self.mappings = {}\n self.outputs = {}\n self.parameters = {}\n self.resources = {}\n self.rules = {}\n self.globals = None\n self.version = None\n self.transform = None\n\n def set_description(self, description: str) -> None:\n self.description = description\n\n def set_metadata(self, metadata: Dict[str, Any]) -> None:\n self.metadata = metadata\n\n def add_condition(self, name: str, condition: AWSHelperFn) -> str:\n self.conditions[name] = condition\n return name\n\n def handle_duplicate_key(self, key: Optional[str]) -> NoReturn:\n raise ValueError('duplicate key \"%s\" detected' % key)\n\n def _update(self, d: Dict[Any, Any], values: __UpdateTypeVar) -> __UpdateTypeVar:\n if isinstance(values, list):\n for v in values:\n if v.title in d:\n self.handle_duplicate_key(v.title)\n d[v.title] = v\n else:\n if values.title in d:\n self.handle_duplicate_key(values.title)\n d[values.title] = values\n return values\n\n def add_output(self, output: __OutputTypeVar) -> __OutputTypeVar:\n if len(self.outputs) >= MAX_OUTPUTS:\n raise ValueError(\"Maximum outputs %d reached\" % MAX_OUTPUTS)\n return self._update(self.outputs, output)\n\n def add_mapping(self, name: str, mapping: Dict[str, Any]) -> None:\n if len(self.mappings) >= MAX_MAPPINGS:\n raise ValueError(\"Maximum mappings %d reached\" % MAX_MAPPINGS)\n if name not in self.mappings:\n self.mappings[name] = {}\n self.mappings[name].update(mapping)\n\n def add_parameter(self, parameter: __ParameterTypeVar) -> __ParameterTypeVar:\n if len(self.parameters) >= MAX_PARAMETERS:\n raise ValueError(\"Maximum parameters %d reached\" % MAX_PARAMETERS)\n return self._update(self.parameters, parameter)\n\n def get_or_add_parameter(self, parameter: Parameter) -> Parameter:\n if parameter.title in self.parameters:\n return self.parameters[parameter.title]\n else:\n self.add_parameter(parameter)\n return parameter\n\n def add_resource(self, resource: __ResourceTypeVar) -> __ResourceTypeVar:\n if len(self.resources) >= MAX_RESOURCES:\n raise ValueError(\"Maximum number of resources %d reached\" % MAX_RESOURCES)\n return self._update(self.resources, resource)\n\n def add_rule(self, name: str, rule: object) -> None:\n \"\"\"\n Add a Rule to the template to enforce extra constraints on the\n parameters. As of June 2019 rules are undocumented in CloudFormation\n but have the same syntax and behaviour as in ServiceCatalog:\n https://docs.aws.amazon.com/servicecatalog/latest/adminguide/reference-template_constraint_rules.html\n\n :param rule: a dict with 'Assertions' (mandatory) and 'RuleCondition'\n (optional) keys\n \"\"\"\n # TODO: check maximum number of Rules, and enforce limit.\n if name in self.rules:\n self.handle_duplicate_key(name)\n self.rules[name] = rule\n\n def set_version(self, version: Optional[str] = None) -> None:\n if version:\n self.version = version\n else:\n self.version = \"2010-09-09\"\n\n def set_transform(self, transform: Union[List[object], str]) -> None:\n from troposphere.serverless import SERVERLESS_TRANSFORM\n\n if self.globals and transform != SERVERLESS_TRANSFORM:\n raise ValueError(\n \"Cannot set transform to non-Serverless while using Globals\"\n )\n self.transform = transform\n\n def set_globals(self, globals: Globals) -> None:\n from troposphere.serverless import SERVERLESS_TRANSFORM\n\n if self.transform != SERVERLESS_TRANSFORM:\n raise ValueError(\n f\"Cannot set Globals for non-Serverless template (set transform to '{SERVERLESS_TRANSFORM}' first)\"\n )\n self.globals = globals\n\n def to_dict(self) -> Dict[str, Any]:\n t = {}\n if self.description:\n t[\"Description\"] = self.description\n if self.metadata:\n t[\"Metadata\"] = self.metadata\n if self.conditions:\n t[\"Conditions\"] = self.conditions\n if self.mappings:\n t[\"Mappings\"] = self.mappings\n if self.outputs:\n t[\"Outputs\"] = self.outputs\n if self.parameters:\n t[\"Parameters\"] = self.parameters\n if self.version:\n t[\"AWSTemplateFormatVersion\"] = self.version\n if self.transform:\n t[\"Transform\"] = self.transform\n if self.rules:\n t[\"Rules\"] = self.rules\n if self.globals:\n t[\"Globals\"] = self.globals\n t[\"Resources\"] = self.resources\n\n return encode_to_dict(t)\n\n def set_parameter_label(self, parameter: Union[Parameter, str], label: str) -> None:\n \"\"\"\n Sets the Label used in the User Interface for the given parameter.\n :type parameter: str or Parameter\n :type label: str\n \"\"\"\n labels = self.metadata.setdefault(\n \"AWS::CloudFormation::Interface\", {}\n ).setdefault(\"ParameterLabels\", {})\n\n if isinstance(parameter, BaseAWSObject):\n parameter = parameter.title\n\n labels[parameter] = {\"default\": label}\n\n def add_parameter_to_group(\n self, parameter: Union[Parameter, str], group_name: str\n ) -> str:\n \"\"\"\n Add a parameter under a group (created if needed).\n :type parameter: str or Parameter\n :type group_name: str\n \"\"\"\n groups = self.metadata.setdefault(\n \"AWS::CloudFormation::Interface\", {}\n ).setdefault(\"ParameterGroups\", [])\n\n if isinstance(parameter, BaseAWSObject):\n parameter = parameter.title\n\n # Check if group_name already exists\n existing_group: Optional[Dict[str, Any]] = None\n for group in groups:\n if group[\"Label\"][\"default\"] == group_name:\n existing_group = group\n break\n\n if existing_group is None:\n existing_group = {\n \"Label\": {\"default\": group_name},\n \"Parameters\": [],\n }\n groups.append(existing_group)\n\n existing_group[\"Parameters\"].append(parameter)\n\n return group_name\n\n def to_json(\n self,\n indent: int = 1,\n sort_keys: bool = True,\n separators: Tuple[str, str] = (\",\", \": \"),\n ) -> str:\n return json.dumps(\n self.to_dict(), indent=indent, sort_keys=sort_keys, separators=separators\n )\n\n def to_yaml(\n self, clean_up: bool = False, long_form: bool = False, sort_keys: bool = True\n ) -> str:\n return cfn_flip.to_yaml( # type: ignore\n self.to_json(sort_keys=sort_keys), clean_up=clean_up, long_form=long_form\n )\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, Template):\n return self.to_json() == other.to_json()\n else:\n return False\n\n def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)\n\n def __hash__(self) -> int:\n return hash(self.to_json())\n\n\nclass Export(AWSHelperFn):\n def __init__(self, name: Union[str, AWSHelperFn]) -> None:\n self.data = {\n \"Name\": name,\n }\n\n\nclass Output(AWSDeclaration):\n props = {\n \"Description\": (str, False),\n \"Export\": (Export, False),\n \"Value\": (str, True),\n }\n\n def add_to_template(self) -> None:\n # Bound it to template if we know it\n if self.template is not None:\n self.template.add_output(self)\n\n\nclass Parameter(AWSDeclaration):\n STRING_PROPERTIES = [\"AllowedPattern\", \"MaxLength\", \"MinLength\"]\n NUMBER_PROPERTIES = [\"MaxValue\", \"MinValue\"]\n props = {\n \"Type\": (str, True),\n \"Default\": ((str, int, float), False),\n \"NoEcho\": (bool, False),\n \"AllowedValues\": (list, False),\n \"AllowedPattern\": (str, False),\n \"MaxLength\": (validators.positive_integer, False),\n \"MinLength\": (validators.positive_integer, False),\n \"MaxValue\": (validators.integer, False),\n \"MinValue\": (validators.integer, False),\n \"Description\": (str, False),\n \"ConstraintDescription\": (str, False),\n }\n title: str\n\n def add_to_template(self) -> None:\n # Bound it to template if we know it\n if self.template is not None:\n self.template.add_parameter(self)\n\n def validate_title(self) -> None:\n if len(self.title) > PARAMETER_TITLE_MAX:\n raise ValueError(\n \"Parameter title can be no longer than \"\n \"%d characters\" % PARAMETER_TITLE_MAX\n )\n super().validate_title()\n\n def validate(self) -> None:\n def check_type(t: type, v: Any) -> bool:\n try:\n t(v)\n return True\n except ValueError:\n return False\n\n # Validate the Default parameter value\n default = self.properties.get(\"Default\")\n if default:\n error_str = (\n \"Parameter default type mismatch: expecting \"\n \"type %s got %s with value %r\"\n )\n # Get the Type specified and see whether the default type\n # matches (in the case of a String Type) or can be coerced\n # into one of the number formats.\n param_type = self.properties.get(\"Type\")\n if param_type == \"String\" and not isinstance(default, str):\n raise ValueError(error_str % (\"String\", type(default), default))\n elif param_type == \"Number\":\n allowed = [float, int]\n # See if the default value can be coerced into one\n # of the correct types\n if not any(check_type(x, default) for x in allowed):\n raise ValueError(error_str % (param_type, type(default), default))\n elif param_type == \"List<Number>\":\n if not isinstance(default, str):\n raise ValueError(error_str % (param_type, type(default), default))\n allowed = [float, int]\n dlist = default.split(\",\")\n for d in dlist:\n # Verify the split array are all numbers\n if not any(check_type(x, d) for x in allowed):\n raise ValueError(error_str % (param_type, type(d), dlist))\n\n if self.properties[\"Type\"] != \"String\":\n for p in self.STRING_PROPERTIES:\n if p in self.properties:\n raise ValueError(\n \"%s can only be used with parameters of \" \"the String type.\" % p\n )\n if self.properties[\"Type\"] != \"Number\":\n for p in self.NUMBER_PROPERTIES:\n if p in self.properties:\n raise ValueError(\n \"%s can only be used with parameters of \" \"the Number type.\" % p\n )\n",
"path": "troposphere/__init__.py"
}
] | [
{
"content": "# Copyright (c) 2012-2022, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\nfrom __future__ import annotations\n\nimport collections.abc\nimport json\nimport re\nimport sys\nimport types\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n ClassVar,\n Dict,\n List,\n NoReturn,\n Optional,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n cast,\n overload,\n)\n\nimport cfn_flip # type: ignore\n\nfrom . import validators\n\nif TYPE_CHECKING:\n from .type_defs.protocols import JSONreprProtocol, ToDictProtocol\n\n # We cannot `from .type_defs.compat import Final` here for now\n # https://github.com/microsoft/pyright/issues/4197\n if sys.version_info < (3, 8):\n from typing_extensions import Final\n else:\n from typing import Final\n\n__version__ = \"4.4.0\"\n\n# constants for DeletionPolicy and UpdateReplacePolicy\nDelete: Final = \"Delete\"\nRetain: Final = \"Retain\"\nRetainExceptOnCreate: Final = \"RetainExceptOnCreate\"\nSnapshot: Final = \"Snapshot\"\n\n# Pseudo Parameters\nAWS_ACCOUNT_ID: Final = \"AWS::AccountId\"\nAWS_NOTIFICATION_ARNS: Final = \"AWS::NotificationARNs\"\nAWS_NO_VALUE: Final = \"AWS::NoValue\"\nAWS_PARTITION: Final = \"AWS::Partition\"\nAWS_REGION: Final = \"AWS::Region\"\nAWS_STACK_ID: Final = \"AWS::StackId\"\nAWS_STACK_NAME: Final = \"AWS::StackName\"\nAWS_URL_SUFFIX: Final = \"AWS::URLSuffix\"\n\n# Template Limits\nMAX_MAPPINGS: Final[int] = 200\nMAX_OUTPUTS: Final[int] = 200\nMAX_PARAMETERS: Final[int] = 200\nMAX_RESOURCES: Final[int] = 500\nPARAMETER_TITLE_MAX: Final[int] = 255\n\n\nvalid_names = re.compile(r\"^[a-zA-Z0-9]+$\")\n\n\ndef is_aws_object_subclass(cls: Any) -> bool:\n is_aws_object = False\n try:\n is_aws_object = issubclass(cls, BaseAWSObject)\n # prop_type isn't a class\n except TypeError:\n pass\n return is_aws_object\n\n\n@overload\ndef encode_to_dict(\n obj: Union[Dict[str, Any], JSONreprProtocol, ToDictProtocol]\n) -> Dict[str, Any]:\n ...\n\n\n@overload\ndef encode_to_dict(obj: Union[List[Any], Tuple[Any]]) -> List[Dict[str, Any]]:\n ...\n\n\n@overload\ndef encode_to_dict(obj: Optional[str]) -> Optional[str]:\n ...\n\n\ndef encode_to_dict(\n obj: Union[\n Dict[str, Any], List[Any], JSONreprProtocol, ToDictProtocol, Tuple[Any], Any\n ]\n) -> Union[Dict[str, Any], List[Any], Any]:\n if hasattr(obj, \"to_dict\"):\n # Calling encode_to_dict to ensure object is\n # nomalized to a base dictionary all the way down.\n return encode_to_dict(cast(\"ToDictProtocol\", obj).to_dict())\n\n if isinstance(obj, (list, tuple)):\n new_lst: List[Dict[str, Any]] = []\n for o in obj:\n new_lst.append(encode_to_dict(o))\n return new_lst\n\n if isinstance(obj, dict):\n props: Dict[str, Any] = {}\n for name, prop in obj.items():\n props[name] = encode_to_dict(prop)\n return props\n\n # This is useful when dealing with external libs using\n # this format. Specifically awacs.\n if hasattr(obj, \"JSONrepr\"):\n return encode_to_dict(cast(\"JSONreprProtocol\", obj).JSONrepr())\n\n return obj\n\n\ndef depends_on_helper(\n obj: Optional[Union[List[object], object]]\n) -> Union[Optional[str], List[Optional[str]], List[Any], Any]:\n \"\"\"Handles using .title if the given object is a troposphere resource.\n\n If the given object is a troposphere resource, use the `.title` attribute\n of that resource. If it's a string, just use the string. This should allow\n more pythonic use of DependsOn.\n \"\"\"\n if isinstance(obj, AWSObject):\n return obj.title\n elif isinstance(obj, list):\n return list(map(depends_on_helper, cast(List[object], obj)))\n return obj\n\n\n__BaseAWSObjectTypeVar = TypeVar(\"__BaseAWSObjectTypeVar\", bound=\"BaseAWSObject\")\n\n\nclass BaseAWSObject:\n attributes: List[str]\n dictname: Optional[str]\n do_validation: bool\n properties: Dict[str, Any]\n propnames: Set[str]\n props: ClassVar[\n Dict[str, Tuple[Union[Tuple[type, ...], type, Callable[[Any], Any]], bool]]\n ] = {}\n resource: Dict[str, Any]\n resource_type: Optional[str]\n template: Optional[Template]\n title: Optional[str]\n\n def __init__(\n self,\n title: Optional[str],\n template: Optional[Template] = None,\n validation: bool = True,\n **kwargs: Any,\n ) -> None:\n self.title = title\n self.template = template\n self.do_validation = validation\n # Cache the keys for validity checks\n self.propnames = set(self.props.keys())\n self.attributes = [\n \"Condition\",\n \"CreationPolicy\",\n \"DeletionPolicy\",\n \"DependsOn\",\n \"Metadata\",\n \"UpdatePolicy\",\n \"UpdateReplacePolicy\",\n ]\n\n # try to validate the title if its there\n if self.title:\n self.validate_title()\n\n # Create the list of properties set on this object by the user\n self.properties = {}\n dictname = getattr(self, \"dictname\", None)\n if dictname:\n self.resource = {\n dictname: self.properties,\n }\n else:\n self.resource = self.properties\n if hasattr(self, \"resource_type\") and self.resource_type is not None:\n self.resource[\"Type\"] = self.resource_type\n self.__initialized = True\n\n # Check for properties defined in the class\n for k, (_, _required) in self.props.items():\n v = getattr(type(self), k, None)\n if v is not None and k not in kwargs:\n self.__setattr__(k, v)\n\n # Now that it is initialized, populate it with the kwargs\n for k, v in kwargs.items():\n self.__setattr__(k, v)\n\n self.add_to_template()\n\n def add_to_template(self) -> None:\n # Bound it to template if we know it\n if self.template is not None:\n self.template.add_resource(self)\n\n def __getattr__(self, name: str) -> Any:\n # If pickle loads this object, then __getattr__ will cause\n # an infinite loop when pickle invokes this object to look for\n # __setstate__ before attributes is \"loaded\" into this object.\n # Therefore, short circuit the rest of this call if attributes\n # is not loaded yet.\n if \"attributes\" not in self.__dict__:\n raise AttributeError(name)\n try:\n if name in self.attributes:\n return self.resource[name]\n else:\n return self.properties.__getitem__(name)\n except KeyError:\n # Fall back to the name attribute in the object rather than\n # in the properties dict. This is for non-OpenStack backwards\n # compatibility since OpenStack objects use a \"name\" property.\n if name == \"name\":\n return self.__getattribute__(\"title\")\n raise AttributeError(name)\n\n def __setattr__(self, name: str, value: Any) -> None:\n if (\n name in self.__dict__.keys()\n or \"_BaseAWSObject__initialized\" not in self.__dict__\n ):\n return dict.__setattr__(self, name, value) # type: ignore\n elif name in self.attributes:\n if name == \"DependsOn\":\n self.resource[name] = depends_on_helper(value)\n else:\n self.resource[name] = value\n return None\n elif name in self.propnames:\n # Check the type of the object and compare against what we were\n # expecting.\n expected_type = self.props[name][0]\n\n # If the value is a AWSHelperFn we can't do much validation\n # we'll have to leave that to Amazon. Maybe there's another way\n # to deal with this that we'll come up with eventually\n if isinstance(value, AWSHelperFn):\n return self.properties.__setitem__(name, value)\n\n # If it's a function, call it...\n elif isinstance(expected_type, types.FunctionType):\n try:\n value = expected_type(value)\n except Exception:\n sys.stderr.write(\n \"%s: %s.%s function validator '%s' threw \"\n \"exception:\\n\"\n % (self.__class__, self.title, name, expected_type.__name__)\n )\n raise\n return self.properties.__setitem__(name, value)\n\n # If it's a list of types, check against those types...\n elif isinstance(expected_type, list):\n # If we're expecting a list, then make sure it is a list\n if not isinstance(value, list):\n self._raise_type(name, value, expected_type)\n\n # Special case a list of a single validation function\n if len(expected_type) == 1 and isinstance(\n expected_type[0], types.FunctionType\n ):\n new_value = list(map(expected_type[0], value)) # type: ignore\n return self.properties.__setitem__(name, new_value)\n\n # Iterate over the list and make sure it matches our\n # type checks (as above accept AWSHelperFn because\n # we can't do the validation ourselves)\n for v in cast(List[Any], value):\n if not isinstance(v, tuple(expected_type)) and not isinstance(\n v, AWSHelperFn\n ):\n self._raise_type(name, v, expected_type)\n # Validated so assign it\n return self.properties.__setitem__(name, value)\n\n # Final validity check, compare the type of value against\n # expected_type which should now be either a single type or\n # a tuple of types.\n elif isinstance(value, cast(type, expected_type)):\n return self.properties.__setitem__(name, value)\n else:\n self._raise_type(name, value, expected_type)\n\n type_name = getattr(self, \"resource_type\", self.__class__.__name__)\n\n if type_name == \"AWS::CloudFormation::CustomResource\" or type_name.startswith(\n \"Custom::\"\n ):\n # Add custom resource arguments to the dict without any further\n # validation. The properties of a CustomResource is not known.\n return self.properties.__setitem__(name, value)\n\n raise AttributeError(\n \"%s object does not support attribute %s\" % (type_name, name)\n )\n\n def _raise_type(self, name: str, value: Any, expected_type: Any) -> NoReturn:\n raise TypeError(\n \"%s: %s.%s is %s, expected %s\"\n % (self.__class__, self.title, name, type(value), expected_type)\n )\n\n def validate_title(self) -> None:\n if not self.title or not valid_names.match(self.title):\n raise ValueError('Name \"%s\" not alphanumeric' % self.title)\n\n def validate(self) -> None:\n pass\n\n def no_validation(self: __BaseAWSObjectTypeVar) -> __BaseAWSObjectTypeVar:\n self.do_validation = False\n return self\n\n def to_dict(self) -> Dict[str, Any]:\n if self.do_validation:\n self._validate_props()\n self.validate()\n\n if self.properties:\n return encode_to_dict(self.resource)\n elif hasattr(self, \"resource_type\"):\n d: Dict[str, Any] = {}\n for k, v in self.resource.items():\n if k != \"Properties\":\n d[k] = v\n return d\n else:\n return {}\n\n @classmethod\n def _from_dict(\n cls: Type[__BaseAWSObjectTypeVar], title: Optional[str] = None, **kwargs: Any\n ) -> __BaseAWSObjectTypeVar:\n props: Dict[str, Any] = {}\n for prop_name, value in kwargs.items():\n try:\n prop_attrs = cls.props[prop_name]\n except KeyError:\n raise AttributeError(\n \"Object type %s does not have a \"\n \"%s property.\" % (cls.__name__, prop_name)\n )\n prop_type = prop_attrs[0]\n value = kwargs[prop_name]\n is_aws_object = is_aws_object_subclass(prop_type)\n if is_aws_object:\n if not isinstance(value, collections.abc.Mapping):\n raise ValueError(\n \"Property definition for %s must be \"\n \"a Mapping type\" % prop_name\n )\n value = cast(BaseAWSObject, prop_type)._from_dict(**value)\n\n if isinstance(prop_type, list):\n if not isinstance(value, list):\n raise TypeError(\"Attribute %s must be a \" \"list.\" % prop_name)\n new_value: List[Any] = []\n for v in cast(List[Any], value):\n new_v = v\n if is_aws_object_subclass(prop_type[0]):\n if not isinstance(v, collections.abc.Mapping):\n raise ValueError(\n \"Property definition for %s must be \"\n \"a list of Mapping types\" % prop_name\n )\n new_v = cast(BaseAWSObject, prop_type[0])._from_dict(**v)\n new_value.append(new_v)\n value = new_value\n props[prop_name] = value\n if title:\n return cls(title, **props)\n return cls(**props)\n\n @classmethod\n def from_dict(\n cls: Type[__BaseAWSObjectTypeVar], title: str, d: Dict[str, Any]\n ) -> __BaseAWSObjectTypeVar:\n return cls._from_dict(title, **d)\n\n def _validate_props(self) -> None:\n for k, (_, required) in self.props.items():\n if required and k not in self.properties:\n rtype = getattr(self, \"resource_type\", type(self))\n title = getattr(self, \"title\")\n msg = \"Resource %s required in type %s\" % (k, rtype)\n if title:\n msg += \" (title: %s)\" % title\n raise ValueError(msg)\n\n\nclass AWSObject(BaseAWSObject):\n dictname = \"Properties\"\n\n def ref(self) -> Ref:\n return Ref(self)\n\n Ref = ref\n\n def get_att(self, value: str) -> GetAtt:\n return GetAtt(self, value)\n\n GetAtt = get_att\n\n\nclass AWSDeclaration(BaseAWSObject):\n \"\"\"\n Used for CloudFormation Resource Property objects\n http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/\n aws-product-property-reference.html\n \"\"\"\n\n def __init__(self, title: str, **kwargs: Any) -> None:\n super().__init__(title, **kwargs)\n\n def ref(self) -> Ref:\n return Ref(self)\n\n Ref = ref\n\n\nclass AWSProperty(BaseAWSObject):\n \"\"\"\n Used for CloudFormation Resource Property objects\n http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/\n aws-product-property-reference.html\n \"\"\"\n\n dictname = None\n\n def __init__(self, title: Optional[str] = None, **kwargs: Any) -> None:\n super().__init__(title, **kwargs)\n\n\nclass AWSAttribute(BaseAWSObject):\n \"\"\"\n Used for CloudFormation Resource Attribute objects\n http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/\n aws-product-attribute-reference.html\n \"\"\"\n\n dictname = None\n\n def __init__(self, title: Optional[str] = None, **kwargs: Any) -> None:\n super().__init__(title, **kwargs)\n\n\ndef validate_delimiter(delimiter: object) -> None:\n if not isinstance(delimiter, str):\n raise ValueError(\"Delimiter must be a String, %s provided\" % type(delimiter))\n\n\ndef validate_pausetime(pausetime: str) -> str:\n if not pausetime.startswith(\"PT\"):\n raise ValueError(\"PauseTime should look like PT#H#M#S\")\n return pausetime\n\n\nclass AWSHelperFn:\n data: Any\n\n def getdata(self, data: object) -> Any:\n if isinstance(data, BaseAWSObject):\n return data.title\n else:\n return data\n\n def to_dict(self) -> Any:\n return encode_to_dict(self.data)\n\n\nclass GenericHelperFn(AWSHelperFn):\n \"\"\"Used as a fallback for the template generator\"\"\"\n\n def __init__(self, data: Any):\n self.data = self.getdata(data)\n\n def to_dict(self) -> Any:\n return encode_to_dict(self.data)\n\n\nclass Base64(AWSHelperFn):\n def __init__(self, data: Any) -> None:\n self.data = {\"Fn::Base64\": data}\n\n\nclass FindInMap(AWSHelperFn):\n def __init__(\n self, mapname: object, toplevelkey: object, secondlevelkey: object\n ) -> None:\n self.data = {\n \"Fn::FindInMap\": [self.getdata(mapname), toplevelkey, secondlevelkey]\n }\n\n\nclass GetAtt(AWSHelperFn):\n def __init__(self, logicalName: object, attrName: object) -> None: # noqa: N803\n self.data = {\"Fn::GetAtt\": [self.getdata(logicalName), attrName]}\n\n\nclass Cidr(AWSHelperFn):\n def __init__(\n self, ipblock: object, count: object, sizemask: Optional[object] = None\n ) -> None:\n if sizemask:\n self.data = {\"Fn::Cidr\": [ipblock, count, sizemask]}\n else:\n self.data = {\"Fn::Cidr\": [ipblock, count]}\n\n\nclass GetAZs(AWSHelperFn):\n def __init__(self, region: object = \"\") -> None:\n self.data = {\"Fn::GetAZs\": region}\n\n\nclass If(AWSHelperFn):\n def __init__(self, cond: object, true: object, false: object) -> None:\n self.data = {\"Fn::If\": [self.getdata(cond), true, false]}\n\n\nclass Equals(AWSHelperFn):\n def __init__(self, value_one: object, value_two: object) -> None:\n self.data = {\"Fn::Equals\": [value_one, value_two]}\n\n\nclass And(AWSHelperFn):\n def __init__(self, cond_one: object, cond_two: object, *conds: object) -> None:\n self.data = {\"Fn::And\": [cond_one, cond_two] + list(conds)}\n\n\nclass Or(AWSHelperFn):\n def __init__(self, cond_one: object, cond_two: object, *conds: object) -> None:\n self.data = {\"Fn::Or\": [cond_one, cond_two] + list(conds)}\n\n\nclass Not(AWSHelperFn):\n def __init__(self, cond: object) -> None:\n self.data = {\"Fn::Not\": [self.getdata(cond)]}\n\n\nclass Join(AWSHelperFn):\n def __init__(self, delimiter: object, values: object) -> None:\n validate_delimiter(delimiter)\n self.data = {\"Fn::Join\": [delimiter, values]}\n\n\nclass Split(AWSHelperFn):\n def __init__(self, delimiter: object, values: object) -> None:\n validate_delimiter(delimiter)\n self.data = {\"Fn::Split\": [delimiter, values]}\n\n\nclass Sub(AWSHelperFn):\n def __init__(\n self,\n input_str: object,\n dict_values: Optional[Dict[str, Any]] = None,\n **values: Any,\n ) -> None:\n # merge dict\n if dict_values:\n values.update(dict_values)\n self.data = {\"Fn::Sub\": [input_str, values] if values else input_str}\n\n\nclass Name(AWSHelperFn):\n def __init__(self, data: object) -> None:\n self.data = self.getdata(data)\n\n\nclass Select(AWSHelperFn):\n def __init__(self, indx: object, objects: object) -> None:\n self.data = {\"Fn::Select\": [indx, objects]}\n\n\nclass Ref(AWSHelperFn):\n def __init__(self, data: object) -> None:\n self.data = {\"Ref\": self.getdata(data)}\n\n def __eq__(self, other: Any) -> bool:\n if isinstance(other, self.__class__):\n return self.data == other.data\n return list(self.data.values())[0] == other\n\n def __hash__(self) -> int:\n return hash(list(self.data.values())[0])\n\n\n# The type of the props dict\nPropsDictType = Dict[\n str,\n Tuple[\n Union[\n str,\n AWSProperty,\n AWSHelperFn,\n Callable[[Any], Any],\n Dict[str, Any],\n List[Any],\n Tuple[type, ...],\n ],\n bool,\n ],\n]\n\n# Pseudo Parameter Ref's\nAccountId = Ref(AWS_ACCOUNT_ID)\nNotificationARNs = Ref(AWS_NOTIFICATION_ARNS)\nNoValue = Ref(AWS_NO_VALUE)\nPartition = Ref(AWS_PARTITION)\nRegion = Ref(AWS_REGION)\nStackId = Ref(AWS_STACK_ID)\nStackName = Ref(AWS_STACK_NAME)\nURLSuffix = Ref(AWS_URL_SUFFIX)\n\n\nclass Condition(AWSHelperFn):\n def __init__(self, data: object) -> None:\n self.data = {\"Condition\": self.getdata(data)}\n\n\nclass ImportValue(AWSHelperFn):\n def __init__(self, data: object) -> None:\n self.data = {\"Fn::ImportValue\": data}\n\n\nclass Tag(AWSHelperFn):\n def __init__(self, k: object, v: object) -> None:\n self.data = {\n \"Key\": k,\n \"Value\": v,\n }\n\n\nclass Tags(AWSHelperFn):\n tags: List[Union[AWSHelperFn, Dict[Any, Any]]]\n\n def __init__(self, *args: object, **kwargs: Any):\n self.tags = []\n tag_dict: Dict[Any, Any]\n if not args:\n # Assume kwargs variant\n tag_dict = kwargs\n else:\n tag_dict = {}\n for arg in args:\n # Validate argument passed in is an AWSHelperFn or...\n if isinstance(arg, AWSHelperFn):\n self.tags.append(arg)\n # Validate argument passed in is a dict\n elif isinstance(arg, dict):\n tag_dict.update(cast(Dict[str, Any], arg))\n else:\n raise TypeError(\n \"Tags needs to be either kwargs, dict, or AWSHelperFn\"\n )\n\n def add_tag(\n tag_list: List[Union[AWSHelperFn, Dict[Any, Any]]], k: object, v: object\n ):\n tag_list.append(\n {\n \"Key\": k,\n \"Value\": v,\n }\n )\n\n # Detect and handle non-string Tag items which do not sort in Python3\n if all(isinstance(k, str) for k in tag_dict):\n for k, v in sorted(tag_dict.items()):\n add_tag(self.tags, k, v)\n else:\n for k, v in tag_dict.items():\n add_tag(self.tags, k, v)\n\n # allow concatenation of the Tags object via '+' operator\n def __add__(self, newtags: Tags) -> Tags:\n newtags.tags = self.tags + newtags.tags\n return newtags\n\n def to_dict(self) -> List[Any]:\n return [encode_to_dict(tag) for tag in self.tags]\n\n @classmethod\n def from_dict(cls, title: Optional[str] = None, **kwargs: Any):\n return cls(**kwargs)\n\n\n__OutputTypeVar = TypeVar(\"__OutputTypeVar\", \"Output\", List[\"Output\"])\n__ParameterTypeVar = TypeVar(\"__ParameterTypeVar\", \"Parameter\", List[\"Parameter\"])\n__ResourceTypeVar = TypeVar(\n \"__ResourceTypeVar\", bound=Union[BaseAWSObject, List[BaseAWSObject]]\n)\n__UpdateTypeVar = TypeVar(\n \"__UpdateTypeVar\",\n bound=Union[BaseAWSObject, List[BaseAWSObject], List[\"Output\"], List[\"Parameter\"]],\n)\n\n\nclass Template:\n from troposphere.serverless import Globals\n\n conditions: Dict[str, Union[AWSHelperFn, Condition]]\n description: Optional[str]\n globals: Optional[Globals]\n mappings: Dict[str, Dict[str, Any]]\n metadata: Dict[str, Any]\n outputs: Dict[str, Output]\n parameters: Dict[str, Parameter]\n props: Dict[str, Tuple[type, bool]] = {\n \"AWSTemplateFormatVersion\": (str, False),\n \"Transform\": (str, False),\n \"Description\": (str, False),\n \"Parameters\": (dict, False),\n \"Mappings\": (dict, False),\n \"Resources\": (dict, False),\n \"Globals\": (Globals, False),\n \"Outputs\": (dict, False),\n \"Rules\": (dict, False),\n }\n resources: Dict[str, AWSObject]\n rules: Dict[str, Any]\n transform: Optional[Union[List[object], str]]\n version: Optional[str]\n\n def __init__(\n self,\n Description: Optional[str] = None,\n Metadata: Optional[Dict[str, Any]] = None,\n ): # noqa: N803\n self.description = Description\n self.metadata = {} if Metadata is None else Metadata\n self.conditions = {}\n self.mappings = {}\n self.outputs = {}\n self.parameters = {}\n self.resources = {}\n self.rules = {}\n self.globals = None\n self.version = None\n self.transform = None\n\n def set_description(self, description: str) -> None:\n self.description = description\n\n def set_metadata(self, metadata: Dict[str, Any]) -> None:\n self.metadata = metadata\n\n def add_condition(self, name: str, condition: AWSHelperFn) -> str:\n self.conditions[name] = condition\n return name\n\n def handle_duplicate_key(self, key: Optional[str]) -> NoReturn:\n raise ValueError('duplicate key \"%s\" detected' % key)\n\n def _update(self, d: Dict[Any, Any], values: __UpdateTypeVar) -> __UpdateTypeVar:\n if isinstance(values, list):\n for v in values:\n if v.title in d:\n self.handle_duplicate_key(v.title)\n d[v.title] = v\n else:\n if values.title in d:\n self.handle_duplicate_key(values.title)\n d[values.title] = values\n return values\n\n def add_output(self, output: __OutputTypeVar) -> __OutputTypeVar:\n if len(self.outputs) >= MAX_OUTPUTS:\n raise ValueError(\"Maximum outputs %d reached\" % MAX_OUTPUTS)\n return self._update(self.outputs, output)\n\n def add_mapping(self, name: str, mapping: Dict[str, Any]) -> None:\n if len(self.mappings) >= MAX_MAPPINGS:\n raise ValueError(\"Maximum mappings %d reached\" % MAX_MAPPINGS)\n if name not in self.mappings:\n self.mappings[name] = {}\n self.mappings[name].update(mapping)\n\n def add_parameter(self, parameter: __ParameterTypeVar) -> __ParameterTypeVar:\n if len(self.parameters) >= MAX_PARAMETERS:\n raise ValueError(\"Maximum parameters %d reached\" % MAX_PARAMETERS)\n return self._update(self.parameters, parameter)\n\n def get_or_add_parameter(self, parameter: Parameter) -> Parameter:\n if parameter.title in self.parameters:\n return self.parameters[parameter.title]\n else:\n self.add_parameter(parameter)\n return parameter\n\n def add_resource(self, resource: __ResourceTypeVar) -> __ResourceTypeVar:\n if len(self.resources) >= MAX_RESOURCES:\n raise ValueError(\"Maximum number of resources %d reached\" % MAX_RESOURCES)\n return self._update(self.resources, resource)\n\n def add_rule(self, name: str, rule: object) -> None:\n \"\"\"\n Add a Rule to the template to enforce extra constraints on the\n parameters. As of June 2019 rules are undocumented in CloudFormation\n but have the same syntax and behaviour as in ServiceCatalog:\n https://docs.aws.amazon.com/servicecatalog/latest/adminguide/reference-template_constraint_rules.html\n\n :param rule: a dict with 'Assertions' (mandatory) and 'RuleCondition'\n (optional) keys\n \"\"\"\n # TODO: check maximum number of Rules, and enforce limit.\n if name in self.rules:\n self.handle_duplicate_key(name)\n self.rules[name] = rule\n\n def set_version(self, version: Optional[str] = None) -> None:\n if version:\n self.version = version\n else:\n self.version = \"2010-09-09\"\n\n def set_transform(self, transform: Union[List[object], str]) -> None:\n from troposphere.serverless import SERVERLESS_TRANSFORM\n\n if self.globals and transform != SERVERLESS_TRANSFORM:\n raise ValueError(\n \"Cannot set transform to non-Serverless while using Globals\"\n )\n self.transform = transform\n\n def set_globals(self, globals: Globals) -> None:\n from troposphere.serverless import SERVERLESS_TRANSFORM\n\n if self.transform != SERVERLESS_TRANSFORM:\n raise ValueError(\n f\"Cannot set Globals for non-Serverless template (set transform to '{SERVERLESS_TRANSFORM}' first)\"\n )\n self.globals = globals\n\n def to_dict(self) -> Dict[str, Any]:\n t = {}\n if self.description:\n t[\"Description\"] = self.description\n if self.metadata:\n t[\"Metadata\"] = self.metadata\n if self.conditions:\n t[\"Conditions\"] = self.conditions\n if self.mappings:\n t[\"Mappings\"] = self.mappings\n if self.outputs:\n t[\"Outputs\"] = self.outputs\n if self.parameters:\n t[\"Parameters\"] = self.parameters\n if self.version:\n t[\"AWSTemplateFormatVersion\"] = self.version\n if self.transform:\n t[\"Transform\"] = self.transform\n if self.rules:\n t[\"Rules\"] = self.rules\n if self.globals:\n t[\"Globals\"] = self.globals\n t[\"Resources\"] = self.resources\n\n return encode_to_dict(t)\n\n def set_parameter_label(self, parameter: Union[Parameter, str], label: str) -> None:\n \"\"\"\n Sets the Label used in the User Interface for the given parameter.\n :type parameter: str or Parameter\n :type label: str\n \"\"\"\n labels = self.metadata.setdefault(\n \"AWS::CloudFormation::Interface\", {}\n ).setdefault(\"ParameterLabels\", {})\n\n if isinstance(parameter, BaseAWSObject):\n parameter = parameter.title\n\n labels[parameter] = {\"default\": label}\n\n def add_parameter_to_group(\n self, parameter: Union[Parameter, str], group_name: str\n ) -> str:\n \"\"\"\n Add a parameter under a group (created if needed).\n :type parameter: str or Parameter\n :type group_name: str\n \"\"\"\n groups = self.metadata.setdefault(\n \"AWS::CloudFormation::Interface\", {}\n ).setdefault(\"ParameterGroups\", [])\n\n if isinstance(parameter, BaseAWSObject):\n parameter = parameter.title\n\n # Check if group_name already exists\n existing_group: Optional[Dict[str, Any]] = None\n for group in groups:\n if group[\"Label\"][\"default\"] == group_name:\n existing_group = group\n break\n\n if existing_group is None:\n existing_group = {\n \"Label\": {\"default\": group_name},\n \"Parameters\": [],\n }\n groups.append(existing_group)\n\n existing_group[\"Parameters\"].append(parameter)\n\n return group_name\n\n def to_json(\n self,\n indent: int = 1,\n sort_keys: bool = True,\n separators: Tuple[str, str] = (\",\", \": \"),\n ) -> str:\n return json.dumps(\n self.to_dict(), indent=indent, sort_keys=sort_keys, separators=separators\n )\n\n def to_yaml(\n self, clean_up: bool = False, long_form: bool = False, sort_keys: bool = True\n ) -> str:\n return cfn_flip.to_yaml( # type: ignore\n self.to_json(sort_keys=sort_keys), clean_up=clean_up, long_form=long_form\n )\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, Template):\n return self.to_json() == other.to_json()\n else:\n return False\n\n def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)\n\n def __hash__(self) -> int:\n return hash(self.to_json())\n\n\nclass Export(AWSHelperFn):\n def __init__(self, name: Union[str, AWSHelperFn]) -> None:\n self.data = {\n \"Name\": name,\n }\n\n\nclass Output(AWSDeclaration):\n props = {\n \"Description\": (str, False),\n \"Export\": (Export, False),\n \"Value\": (str, True),\n }\n\n def add_to_template(self) -> None:\n # Bound it to template if we know it\n if self.template is not None:\n self.template.add_output(self)\n\n\nclass Parameter(AWSDeclaration):\n STRING_PROPERTIES = [\"AllowedPattern\", \"MaxLength\", \"MinLength\"]\n NUMBER_PROPERTIES = [\"MaxValue\", \"MinValue\"]\n props = {\n \"Type\": (str, True),\n \"Default\": ((str, int, float), False),\n \"NoEcho\": (bool, False),\n \"AllowedValues\": (list, False),\n \"AllowedPattern\": (str, False),\n \"MaxLength\": (validators.positive_integer, False),\n \"MinLength\": (validators.positive_integer, False),\n \"MaxValue\": (validators.integer, False),\n \"MinValue\": (validators.integer, False),\n \"Description\": (str, False),\n \"ConstraintDescription\": (str, False),\n }\n title: str\n\n def add_to_template(self) -> None:\n # Bound it to template if we know it\n if self.template is not None:\n self.template.add_parameter(self)\n\n def validate_title(self) -> None:\n if len(self.title) > PARAMETER_TITLE_MAX:\n raise ValueError(\n \"Parameter title can be no longer than \"\n \"%d characters\" % PARAMETER_TITLE_MAX\n )\n super().validate_title()\n\n def validate(self) -> None:\n def check_type(t: type, v: Any) -> bool:\n try:\n t(v)\n return True\n except ValueError:\n return False\n\n # Validate the Default parameter value\n default = self.properties.get(\"Default\")\n if default:\n error_str = (\n \"Parameter default type mismatch: expecting \"\n \"type %s got %s with value %r\"\n )\n # Get the Type specified and see whether the default type\n # matches (in the case of a String Type) or can be coerced\n # into one of the number formats.\n param_type = self.properties.get(\"Type\")\n if param_type == \"String\" and not isinstance(default, str):\n raise ValueError(error_str % (\"String\", type(default), default))\n elif param_type == \"Number\":\n allowed = [float, int]\n # See if the default value can be coerced into one\n # of the correct types\n if not any(check_type(x, default) for x in allowed):\n raise ValueError(error_str % (param_type, type(default), default))\n elif param_type == \"List<Number>\":\n if not isinstance(default, str):\n raise ValueError(error_str % (param_type, type(default), default))\n allowed = [float, int]\n dlist = default.split(\",\")\n for d in dlist:\n # Verify the split array are all numbers\n if not any(check_type(x, d) for x in allowed):\n raise ValueError(error_str % (param_type, type(d), dlist))\n\n if self.properties[\"Type\"] != \"String\":\n for p in self.STRING_PROPERTIES:\n if p in self.properties:\n raise ValueError(\n \"%s can only be used with parameters of \" \"the String type.\" % p\n )\n if self.properties[\"Type\"] != \"Number\":\n for p in self.NUMBER_PROPERTIES:\n if p in self.properties:\n raise ValueError(\n \"%s can only be used with parameters of \" \"the Number type.\" % p\n )\n",
"path": "troposphere/__init__.py"
}
] | diff --git a/troposphere/__init__.py b/troposphere/__init__.py
index 9ed9147bf..9bcdfa2fd 100644
--- a/troposphere/__init__.py
+++ b/troposphere/__init__.py
@@ -46,6 +46,7 @@
# constants for DeletionPolicy and UpdateReplacePolicy
Delete: Final = "Delete"
Retain: Final = "Retain"
+RetainExceptOnCreate: Final = "RetainExceptOnCreate"
Snapshot: Final = "Snapshot"
# Pseudo Parameters
|
GoogleCloudPlatform__PerfKitBenchmarker-2045 | PodIP address is not populated while running iperf on Kubernetes
Referencing #1990. Sorry for late reply. Even after using ```--ip_addresses=INTERNAL```, IP is not getting populated.
I tried all the options for ```ip_addresses``` -> BOTH, INTERNAL, EXTERNAL, REACHABLE. All give the same issue.
Command used: ```./pkb.py --cloud=Kubernetes --benchmarks=iperf --kubeconfig=/root/.kube/config --image=ptest:v0.2 --ip_addresses=INTERNAL```
Notice: ```root@None``` in below logs
```
Running: ssh -A -p 22 root@None -2 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentitiesOnly=yes -o PreferredAuthentications=publickey -o PasswordAuthentication=no -o ConnectTimeout=5 -o GSSAPIAuthentication=no -o ServerAliveInterval=30 -o ServerAliveCountMax=10 -i /tmp/perfkitbenchmarker/runs/f86259b8/perfkitbenchmarker_keyfile mkdir -p /tmp/pkb```
```
}
],
"hostIP": "91.106.194.55",
"phase": "Running",
"podIP": "10.233.84.192",
"qosClass": "BestEffort",
"startTime": "2019-11-20T05:57:07Z"
}
}
```
| [
{
"content": "# Copyright 2017 PerfKitBenchmarker Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Contains code related to lifecycle management of Kubernetes Pods.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport logging\nimport posixpath\n\nfrom perfkitbenchmarker import context\nfrom perfkitbenchmarker import disk\nfrom perfkitbenchmarker import errors\nfrom perfkitbenchmarker import flags\nfrom perfkitbenchmarker import kubernetes_helper\nfrom perfkitbenchmarker import providers\nfrom perfkitbenchmarker import virtual_machine, linux_virtual_machine\nfrom perfkitbenchmarker import vm_util\nfrom perfkitbenchmarker.providers.aws import aws_virtual_machine\nfrom perfkitbenchmarker.providers.azure import azure_virtual_machine\nfrom perfkitbenchmarker.providers.gcp import gce_virtual_machine\nfrom perfkitbenchmarker.providers.kubernetes import kubernetes_disk\nfrom perfkitbenchmarker.vm_util import OUTPUT_STDOUT as STDOUT\nimport six\n\nFLAGS = flags.FLAGS\n\nUBUNTU_IMAGE = 'ubuntu-upstart'\nSELECTOR_PREFIX = 'pkb'\n\n\nclass KubernetesVirtualMachine(virtual_machine.BaseVirtualMachine):\n \"\"\"Object representing a Kubernetes POD.\"\"\"\n CLOUD = providers.KUBERNETES\n DEFAULT_IMAGE = None\n CONTAINER_COMMAND = None\n HOME_DIR = '/root'\n IS_REBOOTABLE = False\n\n def __init__(self, vm_spec):\n \"\"\"Initialize a Kubernetes virtual machine.\n\n Args:\n vm_spec: KubernetesPodSpec object of the vm.\n \"\"\"\n super(KubernetesVirtualMachine, self).__init__(vm_spec)\n self.num_scratch_disks = 0\n self.name = self.name.replace('_', '-')\n self.user_name = FLAGS.username\n self.image = self.image or self.DEFAULT_IMAGE\n self.resource_limits = vm_spec.resource_limits\n self.resource_requests = vm_spec.resource_requests\n\n def GetResourceMetadata(self):\n metadata = super(KubernetesVirtualMachine, self).GetResourceMetadata()\n if self.resource_limits:\n metadata.update({\n 'pod_cpu_limit': self.resource_limits.cpus,\n 'pod_memory_limit_mb': self.resource_limits.memory,\n })\n if self.resource_requests:\n metadata.update({\n 'pod_cpu_request': self.resource_requests.cpus,\n 'pod_memory_request_mb': self.resource_requests.memory,\n })\n return metadata\n\n def _CreateDependencies(self):\n self._CheckPrerequisites()\n self._CreateVolumes()\n\n def _DeleteDependencies(self):\n self._DeleteVolumes()\n\n def _Create(self):\n self._CreatePod()\n self._WaitForPodBootCompletion()\n\n @vm_util.Retry()\n def _PostCreate(self):\n self._GetInternalIp()\n self._ConfigureProxy()\n self._SetupDevicesPaths()\n\n def _Delete(self):\n self._DeletePod()\n\n def _CheckPrerequisites(self):\n \"\"\"Exits if any of the prerequisites is not met.\"\"\"\n if not FLAGS.kubectl:\n raise Exception('Please provide path to kubectl tool using --kubectl '\n 'flag. Exiting.')\n if not FLAGS.kubeconfig:\n raise Exception('Please provide path to kubeconfig using --kubeconfig '\n 'flag. Exiting.')\n if self.disk_specs and self.disk_specs[0].disk_type == disk.STANDARD:\n if not FLAGS.ceph_monitors:\n raise Exception('Please provide a list of Ceph Monitors using '\n '--ceph_monitors flag.')\n\n def _CreatePod(self):\n \"\"\"Creates a POD (Docker container with optional volumes).\"\"\"\n create_rc_body = self._BuildPodBody()\n logging.info('About to create a pod with the following configuration:')\n logging.info(create_rc_body)\n kubernetes_helper.CreateResource(create_rc_body)\n\n @vm_util.Retry(poll_interval=10, max_retries=100, log_errors=False)\n def _WaitForPodBootCompletion(self):\n \"\"\"\n Need to wait for the PODs to get up - PODs are created with a little delay.\n \"\"\"\n exists_cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig, 'get',\n 'pod', '-o=json', self.name]\n logging.info('Waiting for POD %s', self.name)\n pod_info, _, _ = vm_util.IssueCommand(exists_cmd, suppress_warning=True,\n raise_on_failure=False)\n if pod_info:\n pod_info = json.loads(pod_info)\n containers = pod_info['spec']['containers']\n if len(containers) == 1:\n pod_status = pod_info['status']['phase']\n if (containers[0]['name'].startswith(self.name)\n and pod_status == 'Running'):\n logging.info('POD is up and running.')\n return\n raise Exception('POD %s is not running. Retrying to check status.' %\n self.name)\n\n def _DeletePod(self):\n \"\"\"Deletes a POD.\"\"\"\n delete_pod = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig,\n 'delete', 'pod', self.name]\n output = vm_util.IssueCommand(delete_pod, raise_on_failure=False)\n logging.info(output[STDOUT].rstrip())\n\n @vm_util.Retry(poll_interval=10, max_retries=20)\n def _Exists(self):\n \"\"\"POD should have been already created but this is a double check.\"\"\"\n exists_cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig, 'get',\n 'pod', '-o=json', self.name]\n pod_info, _, _ = vm_util.IssueCommand(\n exists_cmd, suppress_warning=True, raise_on_failure=False)\n if pod_info:\n return True\n return False\n\n def _CreateVolumes(self):\n \"\"\"\n Creates volumes for scratch disks. These volumes have to be created\n BEFORE containers creation because Kubernetes doesn't allow to attach\n volume to currently running containers.\n \"\"\"\n self.scratch_disks = kubernetes_disk.CreateDisks(self.disk_specs, self.name)\n\n @vm_util.Retry(poll_interval=10, max_retries=20, log_errors=False)\n def _DeleteVolumes(self):\n \"\"\"Deletes volumes.\"\"\"\n for scratch_disk in self.scratch_disks[:]:\n scratch_disk.Delete()\n self.scratch_disks.remove(scratch_disk)\n\n def DeleteScratchDisks(self):\n pass\n\n def _GetInternalIp(self):\n \"\"\"Gets the POD's internal ip address.\"\"\"\n pod_ip = kubernetes_helper.Get(\n 'pods', self.name, '', '.status.podIP')\n\n if not pod_ip:\n raise Exception('Internal POD IP address not found. Retrying.')\n\n self.internal_ip = pod_ip\n\n def _ConfigureProxy(self):\n \"\"\"\n In Docker containers environment variables from /etc/environment\n are not sourced - this results in connection problems when running\n behind proxy. Prepending proxy environment variables to bashrc\n solves the problem. Note: APPENDING to bashrc will not work because\n the script exits when it is NOT executed in interactive shell.\n \"\"\"\n\n if FLAGS.http_proxy:\n http_proxy = 'sed -i \\'1i export http_proxy=%s\\' /etc/bash.bashrc'\n self.RemoteCommand(http_proxy % FLAGS.http_proxy)\n if FLAGS.https_proxy:\n https_proxy = 'sed -i \\'1i export https_proxy=%s\\' /etc/bash.bashrc'\n self.RemoteCommand(https_proxy % FLAGS.http_proxy)\n if FLAGS.ftp_proxy:\n ftp_proxy = 'sed -i \\'1i export ftp_proxy=%s\\' /etc/bash.bashrc'\n self.RemoteCommand(ftp_proxy % FLAGS.ftp_proxy)\n\n def _SetupDevicesPaths(self):\n \"\"\"Sets the path to each scratch disk device.\"\"\"\n for scratch_disk in self.scratch_disks:\n scratch_disk.SetDevicePath(self)\n\n def _BuildPodBody(self):\n \"\"\"\n Builds a JSON which will be passed as a body of POST request\n to Kuberneres API in order to create a POD.\n \"\"\"\n\n container = self._BuildContainerBody()\n volumes = self._BuildVolumesBody()\n\n template = {\n 'kind': 'Pod',\n 'apiVersion': 'v1',\n 'metadata': {\n 'name': self.name,\n 'labels': {\n SELECTOR_PREFIX: self.name\n }\n },\n 'spec': {\n 'volumes': volumes,\n 'containers': [container],\n 'dnsPolicy': 'ClusterFirst',\n }\n }\n if FLAGS.kubernetes_anti_affinity:\n template['spec']['affinity'] = {\n 'podAntiAffinity': {\n 'requiredDuringSchedulingIgnoredDuringExecution': [{\n 'labelSelector': {\n 'matchExpressions': [{\n 'key': 'pkb_anti_affinity',\n 'operator': 'In',\n 'values': [''],\n }],\n },\n 'topologyKey': 'kubernetes.io/hostname',\n }],\n },\n }\n template['metadata']['labels']['pkb_anti_affinity'] = ''\n\n return json.dumps(template)\n\n def _BuildVolumesBody(self):\n \"\"\"Constructs volumes-related part of POST request to create POD.\"\"\"\n volumes = []\n\n for scratch_disk in self.scratch_disks:\n scratch_disk.AttachVolumeInfo(volumes)\n\n return volumes\n\n def _BuildContainerBody(self):\n \"\"\"Constructs containers-related part of POST request to create POD.\"\"\"\n registry = getattr(context.GetThreadBenchmarkSpec(), 'registry', None)\n if (not FLAGS.static_container_image and\n registry is not None):\n image = registry.GetFullRegistryTag(self.image)\n else:\n image = self.image\n container = {\n 'image': image,\n 'name': self.name,\n 'workingDir': self.HOME_DIR,\n 'securityContext': {\n 'privileged': FLAGS.docker_in_privileged_mode\n },\n 'volumeMounts': [\n ]\n }\n\n for scratch_disk in self.scratch_disks:\n scratch_disk.AttachVolumeMountInfo(container['volumeMounts'])\n\n resource_body = self._BuildResourceBody()\n if resource_body:\n container['resources'] = resource_body\n\n if self.CONTAINER_COMMAND:\n container['command'] = self.CONTAINER_COMMAND\n\n return container\n\n def _BuildResourceBody(self):\n \"\"\"Constructs a dictionary that specifies resource limits and requests.\n\n The syntax for including GPUs is specific to GKE and is likely to\n change in the future.\n See https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus\n\n Returns:\n kubernetes pod resource body containing pod limits and requests.\n \"\"\"\n resources = {\n 'limits': {},\n 'requests': {},\n }\n\n if self.resource_requests:\n resources['requests'].update({\n 'cpu': str(self.resource_requests.cpus),\n 'memory': '{0}Mi'.format(self.resource_requests.memory),\n })\n\n if self.resource_limits:\n resources['limits'].update({\n 'cpu': str(self.resource_limits.cpus),\n 'memory': '{0}Mi'.format(self.resource_limits.memory),\n })\n\n if self.gpu_count:\n gpu_dict = {\n 'nvidia.com/gpu': str(self.gpu_count)\n }\n resources['limits'].update(gpu_dict)\n resources['requests'].update(gpu_dict)\n\n result_with_empty_values_removed = ({\n k: v for k, v in six.iteritems(resources) if v\n })\n return result_with_empty_values_removed\n\n\nclass DebianBasedKubernetesVirtualMachine(KubernetesVirtualMachine,\n linux_virtual_machine.DebianMixin):\n DEFAULT_IMAGE = UBUNTU_IMAGE\n\n def RemoteHostCommandWithReturnCode(self, command,\n should_log=False, retries=None,\n ignore_failure=False, login_shell=False,\n suppress_warning=False, timeout=None):\n \"\"\"Runs a command in the Kubernetes container.\"\"\"\n cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig, 'exec', '-i',\n self.name, '--', '/bin/bash', '-c', command]\n stdout, stderr, retcode = vm_util.IssueCommand(\n cmd, force_info_log=should_log,\n suppress_warning=suppress_warning, timeout=timeout,\n raise_on_failure=False)\n if not ignore_failure and retcode:\n error_text = ('Got non-zero return code (%s) executing %s\\n'\n 'Full command: %s\\nSTDOUT: %sSTDERR: %s' %\n (retcode, command, ' '.join(cmd),\n stdout, stderr))\n raise errors.VirtualMachine.RemoteCommandError(error_text)\n return stdout, stderr, retcode\n\n def MoveHostFile(self, target, source_path, remote_path=''):\n \"\"\"Copies a file from one VM to a target VM.\n\n Args:\n target: The target BaseVirtualMachine object.\n source_path: The location of the file on the REMOTE machine.\n remote_path: The destination of the file on the TARGET machine, default\n is the home directory.\n \"\"\"\n file_name = vm_util.PrependTempDir(posixpath.basename(source_path))\n self.RemoteHostCopy(file_name, source_path, copy_to=False)\n target.RemoteHostCopy(file_name, remote_path)\n\n def RemoteHostCopy(self, file_path, remote_path='', copy_to=True):\n \"\"\"Copies a file to or from the VM.\n\n Args:\n file_path: Local path to file.\n remote_path: Optional path of where to copy file on remote host.\n copy_to: True to copy to vm, False to copy from vm.\n\n Raises:\n RemoteCommandError: If there was a problem copying the file.\n \"\"\"\n if copy_to:\n file_name = posixpath.basename(file_path)\n src_spec, dest_spec = file_path, '%s:%s' % (self.name, file_name)\n else:\n remote_path, _ = self.RemoteCommand('readlink -f %s' % remote_path)\n remote_path = remote_path.strip()\n src_spec, dest_spec = '%s:%s' % (self.name, remote_path), file_path\n cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig,\n 'cp', src_spec, dest_spec]\n stdout, stderr, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)\n if retcode:\n error_text = ('Got non-zero return code (%s) executing %s\\n'\n 'STDOUT: %sSTDERR: %s' %\n (retcode, ' '.join(cmd), stdout, stderr))\n raise errors.VirtualMachine.RemoteCommandError(error_text)\n if copy_to:\n file_name = posixpath.basename(file_path)\n remote_path = remote_path or file_name\n self.RemoteCommand('mv %s %s; chmod 777 %s' %\n (file_name, remote_path, remote_path))\n\n @vm_util.Retry(log_errors=False, poll_interval=1)\n def PrepareVMEnvironment(self):\n super(DebianBasedKubernetesVirtualMachine, self).PrepareVMEnvironment()\n # Don't rely on SSH being installed in Kubernetes containers,\n # so install it and restart the service so that it is ready to go.\n # Although ssh is not required to connect to the container, MPI\n # benchmarks require it.\n self.InstallPackages('ssh')\n self.RemoteCommand('sudo /etc/init.d/ssh restart', ignore_failure=True)\n self.RemoteCommand('mkdir -p ~/.ssh')\n with open(self.ssh_public_key) as f:\n key = f.read()\n self.RemoteCommand('echo \"%s\" >> ~/.ssh/authorized_keys' % key)\n self.Install('python')\n\n # Needed for the MKL math library.\n self.InstallPackages('cpio')\n\n # Don't assume the relevant CLI is installed in the Kubernetes environment.\n if FLAGS.container_cluster_cloud == 'GCP':\n self.InstallGcloudCli()\n elif FLAGS.container_cluster_cloud == 'AWS':\n self.InstallAwsCli()\n elif FLAGS.container_cluster_cloud == 'Azure':\n self.InstallAzureCli()\n\n def InstallAwsCli(self):\n \"\"\"Installs the AWS CLI; used for downloading preprovisioned data.\"\"\"\n self.Install('aws_credentials')\n self.Install('awscli')\n\n def InstallAzureCli(self):\n \"\"\"Installs the Azure CLI; used for downloading preprovisioned data.\"\"\"\n self.Install('azure_cli')\n self.Install('azure_credentials')\n\n # TODO(ferneyhough): Consider making this a package.\n def InstallGcloudCli(self):\n \"\"\"Installs the Gcloud CLI; used for downloading preprovisioned data.\"\"\"\n self.InstallPackages('curl')\n # The driver /usr/lib/apt/methods/https is sometimes needed for apt-get.\n self.InstallPackages('apt-transport-https')\n self.RemoteCommand('echo \"deb https://packages.cloud.google.com/apt '\n 'cloud-sdk-$(lsb_release -c -s) main\" | sudo tee -a '\n '/etc/apt/sources.list.d/google-cloud-sdk.list')\n self.RemoteCommand('curl https://packages.cloud.google.com/apt/doc/'\n 'apt-key.gpg | sudo apt-key add -')\n self.RemoteCommand('sudo apt-get update && sudo apt-get install '\n '-y google-cloud-sdk')\n\n def DownloadPreprovisionedData(self, install_path, module_name, filename):\n \"\"\"Downloads a preprovisioned data file.\n\n This function works by looking up the VirtualMachine class which matches\n the cloud we are running on (defined by FLAGS.container_cluster_cloud).\n\n Then we look for a module-level function defined in the same module as\n the VirtualMachine class which generates a string used to download\n preprovisioned data for the given cloud.\n\n Note that this implementation is specific to debian os types.\n Windows support will need to be handled in\n WindowsBasedKubernetesVirtualMachine.\n\n Args:\n install_path: The install path on this VM.\n module_name: Name of the module associated with this data file.\n filename: The name of the file that was downloaded.\n\n Raises:\n NotImplementedError: if this method does not support the specified cloud.\n AttributeError: if the VirtualMachine class does not implement\n GenerateDownloadPreprovisionedDataCommand.\n \"\"\"\n cloud = FLAGS.container_cluster_cloud\n if cloud == 'GCP':\n download_function = (gce_virtual_machine.\n GenerateDownloadPreprovisionedDataCommand)\n elif cloud == 'AWS':\n download_function = (aws_virtual_machine.\n GenerateDownloadPreprovisionedDataCommand)\n elif cloud == 'Azure':\n download_function = (azure_virtual_machine.\n GenerateDownloadPreprovisionedDataCommand)\n else:\n raise NotImplementedError(\n 'Cloud {0} does not support downloading preprovisioned '\n 'data on Kubernetes VMs.'.format(cloud))\n\n self.RemoteCommand(\n download_function(install_path, module_name, filename))\n\n def ShouldDownloadPreprovisionedData(self, module_name, filename):\n \"\"\"Returns whether or not preprovisioned data is available.\"\"\"\n cloud = FLAGS.container_cluster_cloud\n if cloud == 'GCP' and FLAGS.gcp_preprovisioned_data_bucket:\n stat_function = (gce_virtual_machine.\n GenerateStatPreprovisionedDataCommand)\n elif cloud == 'AWS' and FLAGS.aws_preprovisioned_data_bucket:\n stat_function = (aws_virtual_machine.\n GenerateStatPreprovisionedDataCommand)\n elif cloud == 'Azure' and FLAGS.azure_preprovisioned_data_bucket:\n stat_function = (azure_virtual_machine.\n GenerateStatPreprovisionedDataCommand)\n else:\n return False\n return self.TryRemoteCommand(stat_function(module_name, filename))\n\n\ndef _install_sudo_command():\n \"\"\"Return a bash command that installs sudo and runs tail indefinitely.\n\n This is useful for some docker images that don't have sudo installed.\n\n Returns:\n a sequence of arguments that use bash to install sudo and never run\n tail indefinitely.\n \"\"\"\n # The canonical ubuntu images as well as the nvidia/cuda\n # image do not have sudo installed so install it and configure\n # the sudoers file such that the root user's environment is\n # preserved when running as sudo. Then run tail indefinitely so that\n # the container does not exit.\n container_command = ' && '.join([\n 'apt-get update',\n 'apt-get install -y sudo',\n 'sed -i \\'/env_reset/d\\' /etc/sudoers',\n 'sed -i \\'/secure_path/d\\' /etc/sudoers',\n 'sudo ldconfig',\n 'tail -f /dev/null',\n ])\n return ['bash', '-c', container_command]\n\n\nclass Ubuntu1404BasedKubernetesVirtualMachine(\n DebianBasedKubernetesVirtualMachine, linux_virtual_machine.Ubuntu1404Mixin):\n # All Ubuntu images below are from https://hub.docker.com/_/ubuntu/\n # Note that they do not include all packages that are typically\n # included with Ubuntu. For example, sudo is not installed.\n # KubernetesVirtualMachine takes care of this by installing\n # sudo in the container startup script.\n DEFAULT_IMAGE = 'ubuntu:14.04'\n CONTAINER_COMMAND = _install_sudo_command()\n\n\nclass Ubuntu1604BasedKubernetesVirtualMachine(\n DebianBasedKubernetesVirtualMachine, linux_virtual_machine.Ubuntu1604Mixin):\n DEFAULT_IMAGE = 'ubuntu:16.04'\n CONTAINER_COMMAND = _install_sudo_command()\n\n\nclass Ubuntu1710BasedKubernetesVirtualMachine(\n DebianBasedKubernetesVirtualMachine, linux_virtual_machine.Ubuntu1710Mixin):\n DEFAULT_IMAGE = 'ubuntu:17.10'\n CONTAINER_COMMAND = _install_sudo_command()\n\n\nclass Ubuntu1604Cuda9BasedKubernetesVirtualMachine(\n DebianBasedKubernetesVirtualMachine,\n linux_virtual_machine.Ubuntu1604Cuda9Mixin):\n # Image is from https://hub.docker.com/r/nvidia/cuda/\n DEFAULT_IMAGE = 'nvidia/cuda:9.0-devel-ubuntu16.04'\n CONTAINER_COMMAND = _install_sudo_command()\n",
"path": "perfkitbenchmarker/providers/kubernetes/kubernetes_virtual_machine.py"
}
] | [
{
"content": "# Copyright 2017 PerfKitBenchmarker Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Contains code related to lifecycle management of Kubernetes Pods.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport logging\nimport posixpath\n\nfrom perfkitbenchmarker import context\nfrom perfkitbenchmarker import disk\nfrom perfkitbenchmarker import errors\nfrom perfkitbenchmarker import flags\nfrom perfkitbenchmarker import kubernetes_helper\nfrom perfkitbenchmarker import providers\nfrom perfkitbenchmarker import virtual_machine, linux_virtual_machine\nfrom perfkitbenchmarker import vm_util\nfrom perfkitbenchmarker.providers.aws import aws_virtual_machine\nfrom perfkitbenchmarker.providers.azure import azure_virtual_machine\nfrom perfkitbenchmarker.providers.gcp import gce_virtual_machine\nfrom perfkitbenchmarker.providers.kubernetes import kubernetes_disk\nfrom perfkitbenchmarker.vm_util import OUTPUT_STDOUT as STDOUT\nimport six\n\nFLAGS = flags.FLAGS\n\nUBUNTU_IMAGE = 'ubuntu-upstart'\nSELECTOR_PREFIX = 'pkb'\n\n\nclass KubernetesVirtualMachine(virtual_machine.BaseVirtualMachine):\n \"\"\"Object representing a Kubernetes POD.\"\"\"\n CLOUD = providers.KUBERNETES\n DEFAULT_IMAGE = None\n CONTAINER_COMMAND = None\n HOME_DIR = '/root'\n IS_REBOOTABLE = False\n\n def __init__(self, vm_spec):\n \"\"\"Initialize a Kubernetes virtual machine.\n\n Args:\n vm_spec: KubernetesPodSpec object of the vm.\n \"\"\"\n super(KubernetesVirtualMachine, self).__init__(vm_spec)\n self.num_scratch_disks = 0\n self.name = self.name.replace('_', '-')\n self.user_name = FLAGS.username\n self.image = self.image or self.DEFAULT_IMAGE\n self.resource_limits = vm_spec.resource_limits\n self.resource_requests = vm_spec.resource_requests\n\n def GetResourceMetadata(self):\n metadata = super(KubernetesVirtualMachine, self).GetResourceMetadata()\n if self.resource_limits:\n metadata.update({\n 'pod_cpu_limit': self.resource_limits.cpus,\n 'pod_memory_limit_mb': self.resource_limits.memory,\n })\n if self.resource_requests:\n metadata.update({\n 'pod_cpu_request': self.resource_requests.cpus,\n 'pod_memory_request_mb': self.resource_requests.memory,\n })\n return metadata\n\n def _CreateDependencies(self):\n self._CheckPrerequisites()\n self._CreateVolumes()\n\n def _DeleteDependencies(self):\n self._DeleteVolumes()\n\n def _Create(self):\n self._CreatePod()\n self._WaitForPodBootCompletion()\n\n @vm_util.Retry()\n def _PostCreate(self):\n self._GetInternalIp()\n self._ConfigureProxy()\n self._SetupDevicesPaths()\n\n def _Delete(self):\n self._DeletePod()\n\n def _CheckPrerequisites(self):\n \"\"\"Exits if any of the prerequisites is not met.\"\"\"\n if not FLAGS.kubectl:\n raise Exception('Please provide path to kubectl tool using --kubectl '\n 'flag. Exiting.')\n if not FLAGS.kubeconfig:\n raise Exception('Please provide path to kubeconfig using --kubeconfig '\n 'flag. Exiting.')\n if self.disk_specs and self.disk_specs[0].disk_type == disk.STANDARD:\n if not FLAGS.ceph_monitors:\n raise Exception('Please provide a list of Ceph Monitors using '\n '--ceph_monitors flag.')\n\n def _CreatePod(self):\n \"\"\"Creates a POD (Docker container with optional volumes).\"\"\"\n create_rc_body = self._BuildPodBody()\n logging.info('About to create a pod with the following configuration:')\n logging.info(create_rc_body)\n kubernetes_helper.CreateResource(create_rc_body)\n\n @vm_util.Retry(poll_interval=10, max_retries=100, log_errors=False)\n def _WaitForPodBootCompletion(self):\n \"\"\"\n Need to wait for the PODs to get up - PODs are created with a little delay.\n \"\"\"\n exists_cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig, 'get',\n 'pod', '-o=json', self.name]\n logging.info('Waiting for POD %s', self.name)\n pod_info, _, _ = vm_util.IssueCommand(exists_cmd, suppress_warning=True,\n raise_on_failure=False)\n if pod_info:\n pod_info = json.loads(pod_info)\n containers = pod_info['spec']['containers']\n if len(containers) == 1:\n pod_status = pod_info['status']['phase']\n if (containers[0]['name'].startswith(self.name)\n and pod_status == 'Running'):\n logging.info('POD is up and running.')\n return\n raise Exception('POD %s is not running. Retrying to check status.' %\n self.name)\n\n def _DeletePod(self):\n \"\"\"Deletes a POD.\"\"\"\n delete_pod = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig,\n 'delete', 'pod', self.name]\n output = vm_util.IssueCommand(delete_pod, raise_on_failure=False)\n logging.info(output[STDOUT].rstrip())\n\n @vm_util.Retry(poll_interval=10, max_retries=20)\n def _Exists(self):\n \"\"\"POD should have been already created but this is a double check.\"\"\"\n exists_cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig, 'get',\n 'pod', '-o=json', self.name]\n pod_info, _, _ = vm_util.IssueCommand(\n exists_cmd, suppress_warning=True, raise_on_failure=False)\n if pod_info:\n return True\n return False\n\n def _CreateVolumes(self):\n \"\"\"\n Creates volumes for scratch disks. These volumes have to be created\n BEFORE containers creation because Kubernetes doesn't allow to attach\n volume to currently running containers.\n \"\"\"\n self.scratch_disks = kubernetes_disk.CreateDisks(self.disk_specs, self.name)\n\n @vm_util.Retry(poll_interval=10, max_retries=20, log_errors=False)\n def _DeleteVolumes(self):\n \"\"\"Deletes volumes.\"\"\"\n for scratch_disk in self.scratch_disks[:]:\n scratch_disk.Delete()\n self.scratch_disks.remove(scratch_disk)\n\n def DeleteScratchDisks(self):\n pass\n\n def _GetInternalIp(self):\n \"\"\"Gets the POD's internal ip address.\"\"\"\n pod_ip = kubernetes_helper.Get(\n 'pods', self.name, '', '.status.podIP')\n\n if not pod_ip:\n raise Exception('Internal POD IP address not found. Retrying.')\n\n self.internal_ip = pod_ip\n self.ip_address = pod_ip\n\n def _ConfigureProxy(self):\n \"\"\"\n In Docker containers environment variables from /etc/environment\n are not sourced - this results in connection problems when running\n behind proxy. Prepending proxy environment variables to bashrc\n solves the problem. Note: APPENDING to bashrc will not work because\n the script exits when it is NOT executed in interactive shell.\n \"\"\"\n\n if FLAGS.http_proxy:\n http_proxy = 'sed -i \\'1i export http_proxy=%s\\' /etc/bash.bashrc'\n self.RemoteCommand(http_proxy % FLAGS.http_proxy)\n if FLAGS.https_proxy:\n https_proxy = 'sed -i \\'1i export https_proxy=%s\\' /etc/bash.bashrc'\n self.RemoteCommand(https_proxy % FLAGS.http_proxy)\n if FLAGS.ftp_proxy:\n ftp_proxy = 'sed -i \\'1i export ftp_proxy=%s\\' /etc/bash.bashrc'\n self.RemoteCommand(ftp_proxy % FLAGS.ftp_proxy)\n\n def _SetupDevicesPaths(self):\n \"\"\"Sets the path to each scratch disk device.\"\"\"\n for scratch_disk in self.scratch_disks:\n scratch_disk.SetDevicePath(self)\n\n def _BuildPodBody(self):\n \"\"\"\n Builds a JSON which will be passed as a body of POST request\n to Kuberneres API in order to create a POD.\n \"\"\"\n\n container = self._BuildContainerBody()\n volumes = self._BuildVolumesBody()\n\n template = {\n 'kind': 'Pod',\n 'apiVersion': 'v1',\n 'metadata': {\n 'name': self.name,\n 'labels': {\n SELECTOR_PREFIX: self.name\n }\n },\n 'spec': {\n 'volumes': volumes,\n 'containers': [container],\n 'dnsPolicy': 'ClusterFirst',\n }\n }\n if FLAGS.kubernetes_anti_affinity:\n template['spec']['affinity'] = {\n 'podAntiAffinity': {\n 'requiredDuringSchedulingIgnoredDuringExecution': [{\n 'labelSelector': {\n 'matchExpressions': [{\n 'key': 'pkb_anti_affinity',\n 'operator': 'In',\n 'values': [''],\n }],\n },\n 'topologyKey': 'kubernetes.io/hostname',\n }],\n },\n }\n template['metadata']['labels']['pkb_anti_affinity'] = ''\n\n return json.dumps(template)\n\n def _BuildVolumesBody(self):\n \"\"\"Constructs volumes-related part of POST request to create POD.\"\"\"\n volumes = []\n\n for scratch_disk in self.scratch_disks:\n scratch_disk.AttachVolumeInfo(volumes)\n\n return volumes\n\n def _BuildContainerBody(self):\n \"\"\"Constructs containers-related part of POST request to create POD.\"\"\"\n registry = getattr(context.GetThreadBenchmarkSpec(), 'registry', None)\n if (not FLAGS.static_container_image and\n registry is not None):\n image = registry.GetFullRegistryTag(self.image)\n else:\n image = self.image\n container = {\n 'image': image,\n 'name': self.name,\n 'workingDir': self.HOME_DIR,\n 'securityContext': {\n 'privileged': FLAGS.docker_in_privileged_mode\n },\n 'volumeMounts': [\n ]\n }\n\n for scratch_disk in self.scratch_disks:\n scratch_disk.AttachVolumeMountInfo(container['volumeMounts'])\n\n resource_body = self._BuildResourceBody()\n if resource_body:\n container['resources'] = resource_body\n\n if self.CONTAINER_COMMAND:\n container['command'] = self.CONTAINER_COMMAND\n\n return container\n\n def _BuildResourceBody(self):\n \"\"\"Constructs a dictionary that specifies resource limits and requests.\n\n The syntax for including GPUs is specific to GKE and is likely to\n change in the future.\n See https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus\n\n Returns:\n kubernetes pod resource body containing pod limits and requests.\n \"\"\"\n resources = {\n 'limits': {},\n 'requests': {},\n }\n\n if self.resource_requests:\n resources['requests'].update({\n 'cpu': str(self.resource_requests.cpus),\n 'memory': '{0}Mi'.format(self.resource_requests.memory),\n })\n\n if self.resource_limits:\n resources['limits'].update({\n 'cpu': str(self.resource_limits.cpus),\n 'memory': '{0}Mi'.format(self.resource_limits.memory),\n })\n\n if self.gpu_count:\n gpu_dict = {\n 'nvidia.com/gpu': str(self.gpu_count)\n }\n resources['limits'].update(gpu_dict)\n resources['requests'].update(gpu_dict)\n\n result_with_empty_values_removed = ({\n k: v for k, v in six.iteritems(resources) if v\n })\n return result_with_empty_values_removed\n\n\nclass DebianBasedKubernetesVirtualMachine(KubernetesVirtualMachine,\n linux_virtual_machine.DebianMixin):\n DEFAULT_IMAGE = UBUNTU_IMAGE\n\n def RemoteHostCommandWithReturnCode(self, command,\n should_log=False, retries=None,\n ignore_failure=False, login_shell=False,\n suppress_warning=False, timeout=None):\n \"\"\"Runs a command in the Kubernetes container.\"\"\"\n cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig, 'exec', '-i',\n self.name, '--', '/bin/bash', '-c', command]\n stdout, stderr, retcode = vm_util.IssueCommand(\n cmd, force_info_log=should_log,\n suppress_warning=suppress_warning, timeout=timeout,\n raise_on_failure=False)\n if not ignore_failure and retcode:\n error_text = ('Got non-zero return code (%s) executing %s\\n'\n 'Full command: %s\\nSTDOUT: %sSTDERR: %s' %\n (retcode, command, ' '.join(cmd),\n stdout, stderr))\n raise errors.VirtualMachine.RemoteCommandError(error_text)\n return stdout, stderr, retcode\n\n def MoveHostFile(self, target, source_path, remote_path=''):\n \"\"\"Copies a file from one VM to a target VM.\n\n Args:\n target: The target BaseVirtualMachine object.\n source_path: The location of the file on the REMOTE machine.\n remote_path: The destination of the file on the TARGET machine, default\n is the home directory.\n \"\"\"\n file_name = vm_util.PrependTempDir(posixpath.basename(source_path))\n self.RemoteHostCopy(file_name, source_path, copy_to=False)\n target.RemoteHostCopy(file_name, remote_path)\n\n def RemoteHostCopy(self, file_path, remote_path='', copy_to=True):\n \"\"\"Copies a file to or from the VM.\n\n Args:\n file_path: Local path to file.\n remote_path: Optional path of where to copy file on remote host.\n copy_to: True to copy to vm, False to copy from vm.\n\n Raises:\n RemoteCommandError: If there was a problem copying the file.\n \"\"\"\n if copy_to:\n file_name = posixpath.basename(file_path)\n src_spec, dest_spec = file_path, '%s:%s' % (self.name, file_name)\n else:\n remote_path, _ = self.RemoteCommand('readlink -f %s' % remote_path)\n remote_path = remote_path.strip()\n src_spec, dest_spec = '%s:%s' % (self.name, remote_path), file_path\n cmd = [FLAGS.kubectl, '--kubeconfig=%s' % FLAGS.kubeconfig,\n 'cp', src_spec, dest_spec]\n stdout, stderr, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)\n if retcode:\n error_text = ('Got non-zero return code (%s) executing %s\\n'\n 'STDOUT: %sSTDERR: %s' %\n (retcode, ' '.join(cmd), stdout, stderr))\n raise errors.VirtualMachine.RemoteCommandError(error_text)\n if copy_to:\n file_name = posixpath.basename(file_path)\n remote_path = remote_path or file_name\n self.RemoteCommand('mv %s %s; chmod 777 %s' %\n (file_name, remote_path, remote_path))\n\n @vm_util.Retry(log_errors=False, poll_interval=1)\n def PrepareVMEnvironment(self):\n super(DebianBasedKubernetesVirtualMachine, self).PrepareVMEnvironment()\n # Don't rely on SSH being installed in Kubernetes containers,\n # so install it and restart the service so that it is ready to go.\n # Although ssh is not required to connect to the container, MPI\n # benchmarks require it.\n self.InstallPackages('ssh')\n self.RemoteCommand('sudo /etc/init.d/ssh restart', ignore_failure=True)\n self.RemoteCommand('mkdir -p ~/.ssh')\n with open(self.ssh_public_key) as f:\n key = f.read()\n self.RemoteCommand('echo \"%s\" >> ~/.ssh/authorized_keys' % key)\n self.Install('python')\n\n # Needed for the MKL math library.\n self.InstallPackages('cpio')\n\n # Don't assume the relevant CLI is installed in the Kubernetes environment.\n if FLAGS.container_cluster_cloud == 'GCP':\n self.InstallGcloudCli()\n elif FLAGS.container_cluster_cloud == 'AWS':\n self.InstallAwsCli()\n elif FLAGS.container_cluster_cloud == 'Azure':\n self.InstallAzureCli()\n\n def InstallAwsCli(self):\n \"\"\"Installs the AWS CLI; used for downloading preprovisioned data.\"\"\"\n self.Install('aws_credentials')\n self.Install('awscli')\n\n def InstallAzureCli(self):\n \"\"\"Installs the Azure CLI; used for downloading preprovisioned data.\"\"\"\n self.Install('azure_cli')\n self.Install('azure_credentials')\n\n # TODO(ferneyhough): Consider making this a package.\n def InstallGcloudCli(self):\n \"\"\"Installs the Gcloud CLI; used for downloading preprovisioned data.\"\"\"\n self.InstallPackages('curl')\n # The driver /usr/lib/apt/methods/https is sometimes needed for apt-get.\n self.InstallPackages('apt-transport-https')\n self.RemoteCommand('echo \"deb https://packages.cloud.google.com/apt '\n 'cloud-sdk-$(lsb_release -c -s) main\" | sudo tee -a '\n '/etc/apt/sources.list.d/google-cloud-sdk.list')\n self.RemoteCommand('curl https://packages.cloud.google.com/apt/doc/'\n 'apt-key.gpg | sudo apt-key add -')\n self.RemoteCommand('sudo apt-get update && sudo apt-get install '\n '-y google-cloud-sdk')\n\n def DownloadPreprovisionedData(self, install_path, module_name, filename):\n \"\"\"Downloads a preprovisioned data file.\n\n This function works by looking up the VirtualMachine class which matches\n the cloud we are running on (defined by FLAGS.container_cluster_cloud).\n\n Then we look for a module-level function defined in the same module as\n the VirtualMachine class which generates a string used to download\n preprovisioned data for the given cloud.\n\n Note that this implementation is specific to debian os types.\n Windows support will need to be handled in\n WindowsBasedKubernetesVirtualMachine.\n\n Args:\n install_path: The install path on this VM.\n module_name: Name of the module associated with this data file.\n filename: The name of the file that was downloaded.\n\n Raises:\n NotImplementedError: if this method does not support the specified cloud.\n AttributeError: if the VirtualMachine class does not implement\n GenerateDownloadPreprovisionedDataCommand.\n \"\"\"\n cloud = FLAGS.container_cluster_cloud\n if cloud == 'GCP':\n download_function = (gce_virtual_machine.\n GenerateDownloadPreprovisionedDataCommand)\n elif cloud == 'AWS':\n download_function = (aws_virtual_machine.\n GenerateDownloadPreprovisionedDataCommand)\n elif cloud == 'Azure':\n download_function = (azure_virtual_machine.\n GenerateDownloadPreprovisionedDataCommand)\n else:\n raise NotImplementedError(\n 'Cloud {0} does not support downloading preprovisioned '\n 'data on Kubernetes VMs.'.format(cloud))\n\n self.RemoteCommand(\n download_function(install_path, module_name, filename))\n\n def ShouldDownloadPreprovisionedData(self, module_name, filename):\n \"\"\"Returns whether or not preprovisioned data is available.\"\"\"\n cloud = FLAGS.container_cluster_cloud\n if cloud == 'GCP' and FLAGS.gcp_preprovisioned_data_bucket:\n stat_function = (gce_virtual_machine.\n GenerateStatPreprovisionedDataCommand)\n elif cloud == 'AWS' and FLAGS.aws_preprovisioned_data_bucket:\n stat_function = (aws_virtual_machine.\n GenerateStatPreprovisionedDataCommand)\n elif cloud == 'Azure' and FLAGS.azure_preprovisioned_data_bucket:\n stat_function = (azure_virtual_machine.\n GenerateStatPreprovisionedDataCommand)\n else:\n return False\n return self.TryRemoteCommand(stat_function(module_name, filename))\n\n\ndef _install_sudo_command():\n \"\"\"Return a bash command that installs sudo and runs tail indefinitely.\n\n This is useful for some docker images that don't have sudo installed.\n\n Returns:\n a sequence of arguments that use bash to install sudo and never run\n tail indefinitely.\n \"\"\"\n # The canonical ubuntu images as well as the nvidia/cuda\n # image do not have sudo installed so install it and configure\n # the sudoers file such that the root user's environment is\n # preserved when running as sudo. Then run tail indefinitely so that\n # the container does not exit.\n container_command = ' && '.join([\n 'apt-get update',\n 'apt-get install -y sudo',\n 'sed -i \\'/env_reset/d\\' /etc/sudoers',\n 'sed -i \\'/secure_path/d\\' /etc/sudoers',\n 'sudo ldconfig',\n 'tail -f /dev/null',\n ])\n return ['bash', '-c', container_command]\n\n\nclass Ubuntu1404BasedKubernetesVirtualMachine(\n DebianBasedKubernetesVirtualMachine, linux_virtual_machine.Ubuntu1404Mixin):\n # All Ubuntu images below are from https://hub.docker.com/_/ubuntu/\n # Note that they do not include all packages that are typically\n # included with Ubuntu. For example, sudo is not installed.\n # KubernetesVirtualMachine takes care of this by installing\n # sudo in the container startup script.\n DEFAULT_IMAGE = 'ubuntu:14.04'\n CONTAINER_COMMAND = _install_sudo_command()\n\n\nclass Ubuntu1604BasedKubernetesVirtualMachine(\n DebianBasedKubernetesVirtualMachine, linux_virtual_machine.Ubuntu1604Mixin):\n DEFAULT_IMAGE = 'ubuntu:16.04'\n CONTAINER_COMMAND = _install_sudo_command()\n\n\nclass Ubuntu1710BasedKubernetesVirtualMachine(\n DebianBasedKubernetesVirtualMachine, linux_virtual_machine.Ubuntu1710Mixin):\n DEFAULT_IMAGE = 'ubuntu:17.10'\n CONTAINER_COMMAND = _install_sudo_command()\n\n\nclass Ubuntu1604Cuda9BasedKubernetesVirtualMachine(\n DebianBasedKubernetesVirtualMachine,\n linux_virtual_machine.Ubuntu1604Cuda9Mixin):\n # Image is from https://hub.docker.com/r/nvidia/cuda/\n DEFAULT_IMAGE = 'nvidia/cuda:9.0-devel-ubuntu16.04'\n CONTAINER_COMMAND = _install_sudo_command()\n",
"path": "perfkitbenchmarker/providers/kubernetes/kubernetes_virtual_machine.py"
}
] | diff --git a/perfkitbenchmarker/providers/kubernetes/kubernetes_virtual_machine.py b/perfkitbenchmarker/providers/kubernetes/kubernetes_virtual_machine.py
index ea5171a86b..5a8ee7f8d0 100644
--- a/perfkitbenchmarker/providers/kubernetes/kubernetes_virtual_machine.py
+++ b/perfkitbenchmarker/providers/kubernetes/kubernetes_virtual_machine.py
@@ -186,6 +186,7 @@ def _GetInternalIp(self):
raise Exception('Internal POD IP address not found. Retrying.')
self.internal_ip = pod_ip
+ self.ip_address = pod_ip
def _ConfigureProxy(self):
"""
|
scikit-image__scikit-image-6502 | `peak_local_max` excludes peaks at border despite `exclude_border=0`
## Description
## Way to reproduce
The python demo below needs a 2D array contained in `data.npy` which is stored in [data.zip](https://github.com/scikit-image/scikit-image/files/8804871/data.zip)
Within that script I run the `peak_local_max` command three times. The respective settings and the resulting peaks are indicated in these plots:

(The underlying data was actually produced by tiling a smaller array three times along the vertical axis.)
From my perspective there is a sixth peak missing in the first plot. I do get that peak if I reduce `min_distance` to 30:

but I don't understand why it is removed with `min_distance=63`. The six peaks are clearly separated by more than 100 units.
```python
import numpy as np
from skimage.feature.peak import peak_local_max
import matplotlib.pyplot as plt
y = np.load("data.npy")
fig, axs = plt.subplots( nrows=3 )
for i, ax in enumerate(axs):
pcolor = ax.pcolormesh( y.T )
cbar = plt.colorbar( pcolor, ax=ax )
if i < 2:
ax.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False)
if i==0:
peaks = peak_local_max( y, min_distance=63, threshold_abs=-0.039, exclude_border=0 )
ax.set_title('min_distance = 63, exclude_border = 0')
elif i==1:
peaks = peak_local_max( y, min_distance=63, threshold_abs=-0.039, exclude_border=True )
ax.set_title('min_distance = exclude_border = 63')
elif i==2:
peaks = peak_local_max( y, min_distance=1, threshold_abs=-0.039, exclude_border=True )
ax.set_title('min_distance = exclude_border = 1')
else:
raise NotImplementedError
axs[i].plot( peaks[:,0], peaks[:,1], marker='x', linewidth=0, color='w' )
fig.show()
```
## Version information
```
3.10.4 | packaged by conda-forge | (main, Mar 24 2022, 17:38:57) [GCC 10.3.0]
Linux-5.13.0-44-generic-x86_64-with-glibc2.31
scikit-image version: 0.19.2
numpy version: 1.21.6
```
| [
{
"content": "from warnings import warn\n\nimport numpy as np\nimport scipy.ndimage as ndi\n\nfrom .. import measure\nfrom .._shared.coord import ensure_spacing\n\n\ndef _get_high_intensity_peaks(image, mask, num_peaks, min_distance, p_norm):\n \"\"\"\n Return the highest intensity peak coordinates.\n \"\"\"\n # get coordinates of peaks\n coord = np.nonzero(mask)\n intensities = image[coord]\n # Highest peak first\n idx_maxsort = np.argsort(-intensities)\n coord = np.transpose(coord)[idx_maxsort]\n\n if np.isfinite(num_peaks):\n max_out = int(num_peaks)\n else:\n max_out = None\n\n coord = ensure_spacing(coord, spacing=min_distance, p_norm=p_norm,\n max_out=max_out)\n\n if len(coord) > num_peaks:\n coord = coord[:num_peaks]\n\n return coord\n\n\ndef _get_peak_mask(image, footprint, threshold, mask=None):\n \"\"\"\n Return the mask containing all peak candidates above thresholds.\n \"\"\"\n if footprint.size == 1 or image.size == 1:\n return image > threshold\n\n image_max = ndi.maximum_filter(image, footprint=footprint,\n mode='constant')\n\n out = image == image_max\n\n # no peak for a trivial image\n image_is_trivial = np.all(out) if mask is None else np.all(out[mask])\n if image_is_trivial:\n out[:] = False\n if mask is not None:\n # isolated pixels in masked area are returned as peaks\n isolated_px = np.logical_xor(mask, ndi.binary_opening(mask))\n out[isolated_px] = True\n\n out &= image > threshold\n return out\n\n\ndef _exclude_border(label, border_width):\n \"\"\"Set label border values to 0.\n\n \"\"\"\n # zero out label borders\n for i, width in enumerate(border_width):\n if width == 0:\n continue\n label[(slice(None),) * i + (slice(None, width),)] = 0\n label[(slice(None),) * i + (slice(-width, None),)] = 0\n return label\n\n\ndef _get_threshold(image, threshold_abs, threshold_rel):\n \"\"\"Return the threshold value according to an absolute and a relative\n value.\n\n \"\"\"\n threshold = threshold_abs if threshold_abs is not None else image.min()\n\n if threshold_rel is not None:\n threshold = max(threshold, threshold_rel * image.max())\n\n return threshold\n\n\ndef _get_excluded_border_width(image, min_distance, exclude_border):\n \"\"\"Return border_width values relative to a min_distance if requested.\n\n \"\"\"\n\n if isinstance(exclude_border, bool):\n border_width = (min_distance if exclude_border else 0,) * image.ndim\n elif isinstance(exclude_border, int):\n if exclude_border < 0:\n raise ValueError(\"`exclude_border` cannot be a negative value\")\n border_width = (exclude_border,) * image.ndim\n elif isinstance(exclude_border, tuple):\n if len(exclude_border) != image.ndim:\n raise ValueError(\n \"`exclude_border` should have the same length as the \"\n \"dimensionality of the image.\")\n for exclude in exclude_border:\n if not isinstance(exclude, int):\n raise ValueError(\n \"`exclude_border`, when expressed as a tuple, must only \"\n \"contain ints.\"\n )\n if exclude < 0:\n raise ValueError(\n \"`exclude_border` can not be a negative value\")\n border_width = exclude_border\n else:\n raise TypeError(\n \"`exclude_border` must be bool, int, or tuple with the same \"\n \"length as the dimensionality of the image.\")\n\n return border_width\n\n\ndef peak_local_max(image, min_distance=1, threshold_abs=None,\n threshold_rel=None, exclude_border=True,\n num_peaks=np.inf, footprint=None, labels=None,\n num_peaks_per_label=np.inf, p_norm=np.inf):\n \"\"\"Find peaks in an image as coordinate list.\n\n Peaks are the local maxima in a region of `2 * min_distance + 1`\n (i.e. peaks are separated by at least `min_distance`).\n\n If both `threshold_abs` and `threshold_rel` are provided, the maximum\n of the two is chosen as the minimum intensity threshold of peaks.\n\n .. versionchanged:: 0.18\n Prior to version 0.18, peaks of the same height within a radius of\n `min_distance` were all returned, but this could cause unexpected\n behaviour. From 0.18 onwards, an arbitrary peak within the region is\n returned. See issue gh-2592.\n\n Parameters\n ----------\n image : ndarray\n Input image.\n min_distance : int, optional\n The minimal allowed distance separating peaks. To find the\n maximum number of peaks, use `min_distance=1`.\n threshold_abs : float or None, optional\n Minimum intensity of peaks. By default, the absolute threshold is\n the minimum intensity of the image.\n threshold_rel : float or None, optional\n Minimum intensity of peaks, calculated as\n ``max(image) * threshold_rel``.\n exclude_border : int, tuple of ints, or bool, optional\n If positive integer, `exclude_border` excludes peaks from within\n `exclude_border`-pixels of the border of the image.\n If tuple of non-negative ints, the length of the tuple must match the\n input array's dimensionality. Each element of the tuple will exclude\n peaks from within `exclude_border`-pixels of the border of the image\n along that dimension.\n If True, takes the `min_distance` parameter as value.\n If zero or False, peaks are identified regardless of their distance\n from the border.\n num_peaks : int, optional\n Maximum number of peaks. When the number of peaks exceeds `num_peaks`,\n return `num_peaks` peaks based on highest peak intensity.\n footprint : ndarray of bools, optional\n If provided, `footprint == 1` represents the local region within which\n to search for peaks at every point in `image`.\n labels : ndarray of ints, optional\n If provided, each unique region `labels == value` represents a unique\n region to search for peaks. Zero is reserved for background.\n num_peaks_per_label : int, optional\n Maximum number of peaks for each label.\n p_norm : float\n Which Minkowski p-norm to use. Should be in the range [1, inf].\n A finite large p may cause a ValueError if overflow can occur.\n ``inf`` corresponds to the Chebyshev distance and 2 to the\n Euclidean distance.\n\n Returns\n -------\n output : ndarray\n The coordinates of the peaks.\n\n Notes\n -----\n The peak local maximum function returns the coordinates of local peaks\n (maxima) in an image. Internally, a maximum filter is used for finding\n local maxima. This operation dilates the original image. After comparison\n of the dilated and original images, this function returns the coordinates\n of the peaks where the dilated image equals the original image.\n\n See also\n --------\n skimage.feature.corner_peaks\n\n Examples\n --------\n >>> img1 = np.zeros((7, 7))\n >>> img1[3, 4] = 1\n >>> img1[3, 2] = 1.5\n >>> img1\n array([[0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [0. , 0. , 1.5, 0. , 1. , 0. , 0. ],\n [0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [0. , 0. , 0. , 0. , 0. , 0. , 0. ]])\n\n >>> peak_local_max(img1, min_distance=1)\n array([[3, 2],\n [3, 4]])\n\n >>> peak_local_max(img1, min_distance=2)\n array([[3, 2]])\n\n >>> img2 = np.zeros((20, 20, 20))\n >>> img2[10, 10, 10] = 1\n >>> img2[15, 15, 15] = 1\n >>> peak_idx = peak_local_max(img2, exclude_border=0)\n >>> peak_idx\n array([[10, 10, 10],\n [15, 15, 15]])\n\n >>> peak_mask = np.zeros_like(img2, dtype=bool)\n >>> peak_mask[tuple(peak_idx.T)] = True\n >>> np.argwhere(peak_mask)\n array([[10, 10, 10],\n [15, 15, 15]])\n\n \"\"\"\n if (footprint is None or footprint.size == 1) and min_distance < 1:\n warn(\"When min_distance < 1, peak_local_max acts as finding \"\n \"image > max(threshold_abs, threshold_rel * max(image)).\",\n RuntimeWarning, stacklevel=2)\n\n border_width = _get_excluded_border_width(image, min_distance,\n exclude_border)\n\n threshold = _get_threshold(image, threshold_abs, threshold_rel)\n\n if footprint is None:\n size = 2 * min_distance + 1\n footprint = np.ones((size, ) * image.ndim, dtype=bool)\n else:\n footprint = np.asarray(footprint)\n\n if labels is None:\n # Non maximum filter\n mask = _get_peak_mask(image, footprint, threshold)\n\n mask = _exclude_border(mask, border_width)\n\n # Select highest intensities (num_peaks)\n coordinates = _get_high_intensity_peaks(image, mask,\n num_peaks,\n min_distance, p_norm)\n\n else:\n _labels = _exclude_border(labels.astype(int, casting=\"safe\"),\n border_width)\n\n if np.issubdtype(image.dtype, np.floating):\n bg_val = np.finfo(image.dtype).min\n else:\n bg_val = np.iinfo(image.dtype).min\n\n # For each label, extract a smaller image enclosing the object of\n # interest, identify num_peaks_per_label peaks\n labels_peak_coord = []\n\n for label_idx, roi in enumerate(ndi.find_objects(_labels)):\n\n if roi is None:\n continue\n\n # Get roi mask\n label_mask = labels[roi] == label_idx + 1\n # Extract image roi\n img_object = image[roi].copy()\n # Ensure masked values don't affect roi's local peaks\n img_object[np.logical_not(label_mask)] = bg_val\n\n mask = _get_peak_mask(img_object, footprint, threshold, label_mask)\n\n coordinates = _get_high_intensity_peaks(img_object, mask,\n num_peaks_per_label,\n min_distance,\n p_norm)\n\n # transform coordinates in global image indices space\n for idx, s in enumerate(roi):\n coordinates[:, idx] += s.start\n\n labels_peak_coord.append(coordinates)\n\n if labels_peak_coord:\n coordinates = np.vstack(labels_peak_coord)\n else:\n coordinates = np.empty((0, 2), dtype=int)\n\n if len(coordinates) > num_peaks:\n out = np.zeros_like(image, dtype=bool)\n out[tuple(coordinates.T)] = True\n coordinates = _get_high_intensity_peaks(image, out,\n num_peaks,\n min_distance,\n p_norm)\n\n return coordinates\n\n\ndef _prominent_peaks(image, min_xdistance=1, min_ydistance=1,\n threshold=None, num_peaks=np.inf):\n \"\"\"Return peaks with non-maximum suppression.\n\n Identifies most prominent features separated by certain distances.\n Non-maximum suppression with different sizes is applied separately\n in the first and second dimension of the image to identify peaks.\n\n Parameters\n ----------\n image : (M, N) ndarray\n Input image.\n min_xdistance : int\n Minimum distance separating features in the x dimension.\n min_ydistance : int\n Minimum distance separating features in the y dimension.\n threshold : float\n Minimum intensity of peaks. Default is `0.5 * max(image)`.\n num_peaks : int\n Maximum number of peaks. When the number of peaks exceeds `num_peaks`,\n return `num_peaks` coordinates based on peak intensity.\n\n Returns\n -------\n intensity, xcoords, ycoords : tuple of array\n Peak intensity values, x and y indices.\n \"\"\"\n\n img = image.copy()\n rows, cols = img.shape\n\n if threshold is None:\n threshold = 0.5 * np.max(img)\n\n ycoords_size = 2 * min_ydistance + 1\n xcoords_size = 2 * min_xdistance + 1\n img_max = ndi.maximum_filter1d(img, size=ycoords_size, axis=0,\n mode='constant', cval=0)\n img_max = ndi.maximum_filter1d(img_max, size=xcoords_size, axis=1,\n mode='constant', cval=0)\n mask = (img == img_max)\n img *= mask\n img_t = img > threshold\n\n label_img = measure.label(img_t)\n props = measure.regionprops(label_img, img_max)\n\n # Sort the list of peaks by intensity, not left-right, so larger peaks\n # in Hough space cannot be arbitrarily suppressed by smaller neighbors\n props = sorted(props, key=lambda x: x.intensity_max)[::-1]\n coords = np.array([np.round(p.centroid) for p in props], dtype=int)\n\n img_peaks = []\n ycoords_peaks = []\n xcoords_peaks = []\n\n # relative coordinate grid for local neighborhood suppression\n ycoords_ext, xcoords_ext = np.mgrid[-min_ydistance:min_ydistance + 1,\n -min_xdistance:min_xdistance + 1]\n\n for ycoords_idx, xcoords_idx in coords:\n accum = img_max[ycoords_idx, xcoords_idx]\n if accum > threshold:\n # absolute coordinate grid for local neighborhood suppression\n ycoords_nh = ycoords_idx + ycoords_ext\n xcoords_nh = xcoords_idx + xcoords_ext\n\n # no reflection for distance neighborhood\n ycoords_in = np.logical_and(ycoords_nh > 0, ycoords_nh < rows)\n ycoords_nh = ycoords_nh[ycoords_in]\n xcoords_nh = xcoords_nh[ycoords_in]\n\n # reflect xcoords and assume xcoords are continuous,\n # e.g. for angles:\n # (..., 88, 89, -90, -89, ..., 89, -90, -89, ...)\n xcoords_low = xcoords_nh < 0\n ycoords_nh[xcoords_low] = rows - ycoords_nh[xcoords_low]\n xcoords_nh[xcoords_low] += cols\n xcoords_high = xcoords_nh >= cols\n ycoords_nh[xcoords_high] = rows - ycoords_nh[xcoords_high]\n xcoords_nh[xcoords_high] -= cols\n\n # suppress neighborhood\n img_max[ycoords_nh, xcoords_nh] = 0\n\n # add current feature to peaks\n img_peaks.append(accum)\n ycoords_peaks.append(ycoords_idx)\n xcoords_peaks.append(xcoords_idx)\n\n img_peaks = np.array(img_peaks)\n ycoords_peaks = np.array(ycoords_peaks)\n xcoords_peaks = np.array(xcoords_peaks)\n\n if num_peaks < len(img_peaks):\n idx_maxsort = np.argsort(img_peaks)[::-1][:num_peaks]\n img_peaks = img_peaks[idx_maxsort]\n ycoords_peaks = ycoords_peaks[idx_maxsort]\n xcoords_peaks = xcoords_peaks[idx_maxsort]\n\n return img_peaks, xcoords_peaks, ycoords_peaks\n",
"path": "skimage/feature/peak.py"
}
] | [
{
"content": "from warnings import warn\n\nimport numpy as np\nimport scipy.ndimage as ndi\n\nfrom .. import measure\nfrom .._shared.coord import ensure_spacing\n\n\ndef _get_high_intensity_peaks(image, mask, num_peaks, min_distance, p_norm):\n \"\"\"\n Return the highest intensity peak coordinates.\n \"\"\"\n # get coordinates of peaks\n coord = np.nonzero(mask)\n intensities = image[coord]\n # Highest peak first\n idx_maxsort = np.argsort(-intensities)\n coord = np.transpose(coord)[idx_maxsort]\n\n if np.isfinite(num_peaks):\n max_out = int(num_peaks)\n else:\n max_out = None\n\n coord = ensure_spacing(coord, spacing=min_distance, p_norm=p_norm,\n max_out=max_out)\n\n if len(coord) > num_peaks:\n coord = coord[:num_peaks]\n\n return coord\n\n\ndef _get_peak_mask(image, footprint, threshold, mask=None):\n \"\"\"\n Return the mask containing all peak candidates above thresholds.\n \"\"\"\n if footprint.size == 1 or image.size == 1:\n return image > threshold\n\n image_max = ndi.maximum_filter(image, footprint=footprint,\n mode='nearest')\n\n out = image == image_max\n\n # no peak for a trivial image\n image_is_trivial = np.all(out) if mask is None else np.all(out[mask])\n if image_is_trivial:\n out[:] = False\n if mask is not None:\n # isolated pixels in masked area are returned as peaks\n isolated_px = np.logical_xor(mask, ndi.binary_opening(mask))\n out[isolated_px] = True\n\n out &= image > threshold\n return out\n\n\ndef _exclude_border(label, border_width):\n \"\"\"Set label border values to 0.\n\n \"\"\"\n # zero out label borders\n for i, width in enumerate(border_width):\n if width == 0:\n continue\n label[(slice(None),) * i + (slice(None, width),)] = 0\n label[(slice(None),) * i + (slice(-width, None),)] = 0\n return label\n\n\ndef _get_threshold(image, threshold_abs, threshold_rel):\n \"\"\"Return the threshold value according to an absolute and a relative\n value.\n\n \"\"\"\n threshold = threshold_abs if threshold_abs is not None else image.min()\n\n if threshold_rel is not None:\n threshold = max(threshold, threshold_rel * image.max())\n\n return threshold\n\n\ndef _get_excluded_border_width(image, min_distance, exclude_border):\n \"\"\"Return border_width values relative to a min_distance if requested.\n\n \"\"\"\n\n if isinstance(exclude_border, bool):\n border_width = (min_distance if exclude_border else 0,) * image.ndim\n elif isinstance(exclude_border, int):\n if exclude_border < 0:\n raise ValueError(\"`exclude_border` cannot be a negative value\")\n border_width = (exclude_border,) * image.ndim\n elif isinstance(exclude_border, tuple):\n if len(exclude_border) != image.ndim:\n raise ValueError(\n \"`exclude_border` should have the same length as the \"\n \"dimensionality of the image.\")\n for exclude in exclude_border:\n if not isinstance(exclude, int):\n raise ValueError(\n \"`exclude_border`, when expressed as a tuple, must only \"\n \"contain ints.\"\n )\n if exclude < 0:\n raise ValueError(\n \"`exclude_border` can not be a negative value\")\n border_width = exclude_border\n else:\n raise TypeError(\n \"`exclude_border` must be bool, int, or tuple with the same \"\n \"length as the dimensionality of the image.\")\n\n return border_width\n\n\ndef peak_local_max(image, min_distance=1, threshold_abs=None,\n threshold_rel=None, exclude_border=True,\n num_peaks=np.inf, footprint=None, labels=None,\n num_peaks_per_label=np.inf, p_norm=np.inf):\n \"\"\"Find peaks in an image as coordinate list.\n\n Peaks are the local maxima in a region of `2 * min_distance + 1`\n (i.e. peaks are separated by at least `min_distance`).\n\n If both `threshold_abs` and `threshold_rel` are provided, the maximum\n of the two is chosen as the minimum intensity threshold of peaks.\n\n .. versionchanged:: 0.18\n Prior to version 0.18, peaks of the same height within a radius of\n `min_distance` were all returned, but this could cause unexpected\n behaviour. From 0.18 onwards, an arbitrary peak within the region is\n returned. See issue gh-2592.\n\n Parameters\n ----------\n image : ndarray\n Input image.\n min_distance : int, optional\n The minimal allowed distance separating peaks. To find the\n maximum number of peaks, use `min_distance=1`.\n threshold_abs : float or None, optional\n Minimum intensity of peaks. By default, the absolute threshold is\n the minimum intensity of the image.\n threshold_rel : float or None, optional\n Minimum intensity of peaks, calculated as\n ``max(image) * threshold_rel``.\n exclude_border : int, tuple of ints, or bool, optional\n If positive integer, `exclude_border` excludes peaks from within\n `exclude_border`-pixels of the border of the image.\n If tuple of non-negative ints, the length of the tuple must match the\n input array's dimensionality. Each element of the tuple will exclude\n peaks from within `exclude_border`-pixels of the border of the image\n along that dimension.\n If True, takes the `min_distance` parameter as value.\n If zero or False, peaks are identified regardless of their distance\n from the border.\n num_peaks : int, optional\n Maximum number of peaks. When the number of peaks exceeds `num_peaks`,\n return `num_peaks` peaks based on highest peak intensity.\n footprint : ndarray of bools, optional\n If provided, `footprint == 1` represents the local region within which\n to search for peaks at every point in `image`.\n labels : ndarray of ints, optional\n If provided, each unique region `labels == value` represents a unique\n region to search for peaks. Zero is reserved for background.\n num_peaks_per_label : int, optional\n Maximum number of peaks for each label.\n p_norm : float\n Which Minkowski p-norm to use. Should be in the range [1, inf].\n A finite large p may cause a ValueError if overflow can occur.\n ``inf`` corresponds to the Chebyshev distance and 2 to the\n Euclidean distance.\n\n Returns\n -------\n output : ndarray\n The coordinates of the peaks.\n\n Notes\n -----\n The peak local maximum function returns the coordinates of local peaks\n (maxima) in an image. Internally, a maximum filter is used for finding\n local maxima. This operation dilates the original image. After comparison\n of the dilated and original images, this function returns the coordinates\n of the peaks where the dilated image equals the original image.\n\n See also\n --------\n skimage.feature.corner_peaks\n\n Examples\n --------\n >>> img1 = np.zeros((7, 7))\n >>> img1[3, 4] = 1\n >>> img1[3, 2] = 1.5\n >>> img1\n array([[0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [0. , 0. , 1.5, 0. , 1. , 0. , 0. ],\n [0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n [0. , 0. , 0. , 0. , 0. , 0. , 0. ]])\n\n >>> peak_local_max(img1, min_distance=1)\n array([[3, 2],\n [3, 4]])\n\n >>> peak_local_max(img1, min_distance=2)\n array([[3, 2]])\n\n >>> img2 = np.zeros((20, 20, 20))\n >>> img2[10, 10, 10] = 1\n >>> img2[15, 15, 15] = 1\n >>> peak_idx = peak_local_max(img2, exclude_border=0)\n >>> peak_idx\n array([[10, 10, 10],\n [15, 15, 15]])\n\n >>> peak_mask = np.zeros_like(img2, dtype=bool)\n >>> peak_mask[tuple(peak_idx.T)] = True\n >>> np.argwhere(peak_mask)\n array([[10, 10, 10],\n [15, 15, 15]])\n\n \"\"\"\n if (footprint is None or footprint.size == 1) and min_distance < 1:\n warn(\"When min_distance < 1, peak_local_max acts as finding \"\n \"image > max(threshold_abs, threshold_rel * max(image)).\",\n RuntimeWarning, stacklevel=2)\n\n border_width = _get_excluded_border_width(image, min_distance,\n exclude_border)\n\n threshold = _get_threshold(image, threshold_abs, threshold_rel)\n\n if footprint is None:\n size = 2 * min_distance + 1\n footprint = np.ones((size, ) * image.ndim, dtype=bool)\n else:\n footprint = np.asarray(footprint)\n\n if labels is None:\n # Non maximum filter\n mask = _get_peak_mask(image, footprint, threshold)\n\n mask = _exclude_border(mask, border_width)\n\n # Select highest intensities (num_peaks)\n coordinates = _get_high_intensity_peaks(image, mask,\n num_peaks,\n min_distance, p_norm)\n\n else:\n _labels = _exclude_border(labels.astype(int, casting=\"safe\"),\n border_width)\n\n if np.issubdtype(image.dtype, np.floating):\n bg_val = np.finfo(image.dtype).min\n else:\n bg_val = np.iinfo(image.dtype).min\n\n # For each label, extract a smaller image enclosing the object of\n # interest, identify num_peaks_per_label peaks\n labels_peak_coord = []\n\n for label_idx, roi in enumerate(ndi.find_objects(_labels)):\n\n if roi is None:\n continue\n\n # Get roi mask\n label_mask = labels[roi] == label_idx + 1\n # Extract image roi\n img_object = image[roi].copy()\n # Ensure masked values don't affect roi's local peaks\n img_object[np.logical_not(label_mask)] = bg_val\n\n mask = _get_peak_mask(img_object, footprint, threshold, label_mask)\n\n coordinates = _get_high_intensity_peaks(img_object, mask,\n num_peaks_per_label,\n min_distance,\n p_norm)\n\n # transform coordinates in global image indices space\n for idx, s in enumerate(roi):\n coordinates[:, idx] += s.start\n\n labels_peak_coord.append(coordinates)\n\n if labels_peak_coord:\n coordinates = np.vstack(labels_peak_coord)\n else:\n coordinates = np.empty((0, 2), dtype=int)\n\n if len(coordinates) > num_peaks:\n out = np.zeros_like(image, dtype=bool)\n out[tuple(coordinates.T)] = True\n coordinates = _get_high_intensity_peaks(image, out,\n num_peaks,\n min_distance,\n p_norm)\n\n return coordinates\n\n\ndef _prominent_peaks(image, min_xdistance=1, min_ydistance=1,\n threshold=None, num_peaks=np.inf):\n \"\"\"Return peaks with non-maximum suppression.\n\n Identifies most prominent features separated by certain distances.\n Non-maximum suppression with different sizes is applied separately\n in the first and second dimension of the image to identify peaks.\n\n Parameters\n ----------\n image : (M, N) ndarray\n Input image.\n min_xdistance : int\n Minimum distance separating features in the x dimension.\n min_ydistance : int\n Minimum distance separating features in the y dimension.\n threshold : float\n Minimum intensity of peaks. Default is `0.5 * max(image)`.\n num_peaks : int\n Maximum number of peaks. When the number of peaks exceeds `num_peaks`,\n return `num_peaks` coordinates based on peak intensity.\n\n Returns\n -------\n intensity, xcoords, ycoords : tuple of array\n Peak intensity values, x and y indices.\n \"\"\"\n\n img = image.copy()\n rows, cols = img.shape\n\n if threshold is None:\n threshold = 0.5 * np.max(img)\n\n ycoords_size = 2 * min_ydistance + 1\n xcoords_size = 2 * min_xdistance + 1\n img_max = ndi.maximum_filter1d(img, size=ycoords_size, axis=0,\n mode='constant', cval=0)\n img_max = ndi.maximum_filter1d(img_max, size=xcoords_size, axis=1,\n mode='constant', cval=0)\n mask = (img == img_max)\n img *= mask\n img_t = img > threshold\n\n label_img = measure.label(img_t)\n props = measure.regionprops(label_img, img_max)\n\n # Sort the list of peaks by intensity, not left-right, so larger peaks\n # in Hough space cannot be arbitrarily suppressed by smaller neighbors\n props = sorted(props, key=lambda x: x.intensity_max)[::-1]\n coords = np.array([np.round(p.centroid) for p in props], dtype=int)\n\n img_peaks = []\n ycoords_peaks = []\n xcoords_peaks = []\n\n # relative coordinate grid for local neighborhood suppression\n ycoords_ext, xcoords_ext = np.mgrid[-min_ydistance:min_ydistance + 1,\n -min_xdistance:min_xdistance + 1]\n\n for ycoords_idx, xcoords_idx in coords:\n accum = img_max[ycoords_idx, xcoords_idx]\n if accum > threshold:\n # absolute coordinate grid for local neighborhood suppression\n ycoords_nh = ycoords_idx + ycoords_ext\n xcoords_nh = xcoords_idx + xcoords_ext\n\n # no reflection for distance neighborhood\n ycoords_in = np.logical_and(ycoords_nh > 0, ycoords_nh < rows)\n ycoords_nh = ycoords_nh[ycoords_in]\n xcoords_nh = xcoords_nh[ycoords_in]\n\n # reflect xcoords and assume xcoords are continuous,\n # e.g. for angles:\n # (..., 88, 89, -90, -89, ..., 89, -90, -89, ...)\n xcoords_low = xcoords_nh < 0\n ycoords_nh[xcoords_low] = rows - ycoords_nh[xcoords_low]\n xcoords_nh[xcoords_low] += cols\n xcoords_high = xcoords_nh >= cols\n ycoords_nh[xcoords_high] = rows - ycoords_nh[xcoords_high]\n xcoords_nh[xcoords_high] -= cols\n\n # suppress neighborhood\n img_max[ycoords_nh, xcoords_nh] = 0\n\n # add current feature to peaks\n img_peaks.append(accum)\n ycoords_peaks.append(ycoords_idx)\n xcoords_peaks.append(xcoords_idx)\n\n img_peaks = np.array(img_peaks)\n ycoords_peaks = np.array(ycoords_peaks)\n xcoords_peaks = np.array(xcoords_peaks)\n\n if num_peaks < len(img_peaks):\n idx_maxsort = np.argsort(img_peaks)[::-1][:num_peaks]\n img_peaks = img_peaks[idx_maxsort]\n ycoords_peaks = ycoords_peaks[idx_maxsort]\n xcoords_peaks = xcoords_peaks[idx_maxsort]\n\n return img_peaks, xcoords_peaks, ycoords_peaks\n",
"path": "skimage/feature/peak.py"
}
] | diff --git a/skimage/feature/peak.py b/skimage/feature/peak.py
index 2bba6537f83..4097d8245b1 100644
--- a/skimage/feature/peak.py
+++ b/skimage/feature/peak.py
@@ -40,7 +40,7 @@ def _get_peak_mask(image, footprint, threshold, mask=None):
return image > threshold
image_max = ndi.maximum_filter(image, footprint=footprint,
- mode='constant')
+ mode='nearest')
out = image == image_max
diff --git a/skimage/feature/tests/test_peak.py b/skimage/feature/tests/test_peak.py
index 113a736d5d3..6c557decf16 100644
--- a/skimage/feature/tests/test_peak.py
+++ b/skimage/feature/tests/test_peak.py
@@ -400,6 +400,19 @@ def test_threshold_rel_default(self):
assert len(peak.peak_local_max(image,
min_distance=0)) == image.size - 1
+ def test_peak_at_border(self):
+ image = np.full((10, 10), -2)
+ image[2, 4] = -1
+ image[3, 0] = -1
+
+ peaks = peak.peak_local_max(image, min_distance=3)
+ assert peaks.size == 0
+
+ peaks = peak.peak_local_max(image, min_distance=3, exclude_border=0)
+ assert len(peaks) == 2
+ assert [2, 4] in peaks
+ assert [3, 0] in peaks
+
@pytest.mark.parametrize(
["indices"],
|
PyGithub__PyGithub-1891 | allow PyJWT 2+
other libraries are moving to PyJWT2+ as requirement, is it possible to update pygithub as well? currently we can't use for example pygithub together with django-social-core
| [
{
"content": "#!/usr/bin/env python\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Tomas Radej <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2015 Jimmy Zelinskie <[email protected]> #\n# Copyright 2016 Felix Yan <[email protected]> #\n# Copyright 2016 Jakub Wilk <[email protected]> #\n# Copyright 2016 Jannis Gebauer <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2017 Hugo <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2017 Nhomar Hernandez <[email protected]> #\n# Copyright 2017 Paul Ortman <[email protected]> #\n# Copyright 2018 Jason White <[email protected]> #\n# Copyright 2018 Mike Miller <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport textwrap\n\nimport setuptools\n\nversion = \"1.54.1\"\n\n\nif __name__ == \"__main__\":\n setuptools.setup(\n name=\"PyGithub\",\n version=version,\n description=\"Use the full Github API v3\",\n author=\"Vincent Jacques\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pygithub/pygithub\",\n project_urls={\n \"Documentation\": \"http://pygithub.readthedocs.io/en/latest/\",\n \"Source\": \"https://github.com/pygithub/pygithub\",\n \"Tracker\": \"https://github.com/pygithub/pygithub/issues\",\n },\n long_description=textwrap.dedent(\n \"\"\"\\\n (Very short) Tutorial\n =====================\n\n First create a Github instance::\n\n from github import Github\n\n # using username and password\n g = Github(\"user\", \"password\")\n\n # or using an access token\n g = Github(\"access_token\")\n\n Then play with your Github objects::\n\n for repo in g.get_user().get_repos():\n print(repo.name)\n repo.edit(has_wiki=False)\n\n Reference documentation\n =======================\n\n See http://pygithub.readthedocs.io/en/latest/\"\"\"\n ),\n packages=[\"github\"],\n package_data={\"github\": [\"py.typed\", \"*.pyi\"]},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Software Development\",\n ],\n python_requires=\">=3.6\",\n install_requires=[\n \"deprecated\",\n \"pyjwt<2.0\",\n \"pynacl>=1.4.0\",\n \"requests>=2.14.0\",\n ],\n extras_require={\"integrations\": [\"cryptography\"]},\n tests_require=[\"cryptography\", \"httpretty>=1.0.3\"],\n )\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Tomas Radej <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2015 Jimmy Zelinskie <[email protected]> #\n# Copyright 2016 Felix Yan <[email protected]> #\n# Copyright 2016 Jakub Wilk <[email protected]> #\n# Copyright 2016 Jannis Gebauer <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2017 Hugo <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2017 Nhomar Hernandez <[email protected]> #\n# Copyright 2017 Paul Ortman <[email protected]> #\n# Copyright 2018 Jason White <[email protected]> #\n# Copyright 2018 Mike Miller <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport textwrap\n\nimport setuptools\n\nversion = \"1.54.1\"\n\n\nif __name__ == \"__main__\":\n setuptools.setup(\n name=\"PyGithub\",\n version=version,\n description=\"Use the full Github API v3\",\n author=\"Vincent Jacques\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pygithub/pygithub\",\n project_urls={\n \"Documentation\": \"http://pygithub.readthedocs.io/en/latest/\",\n \"Source\": \"https://github.com/pygithub/pygithub\",\n \"Tracker\": \"https://github.com/pygithub/pygithub/issues\",\n },\n long_description=textwrap.dedent(\n \"\"\"\\\n (Very short) Tutorial\n =====================\n\n First create a Github instance::\n\n from github import Github\n\n # using username and password\n g = Github(\"user\", \"password\")\n\n # or using an access token\n g = Github(\"access_token\")\n\n Then play with your Github objects::\n\n for repo in g.get_user().get_repos():\n print(repo.name)\n repo.edit(has_wiki=False)\n\n Reference documentation\n =======================\n\n See http://pygithub.readthedocs.io/en/latest/\"\"\"\n ),\n packages=[\"github\"],\n package_data={\"github\": [\"py.typed\", \"*.pyi\"]},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Software Development\",\n ],\n python_requires=\">=3.6\",\n install_requires=[\n \"deprecated\",\n \"pyjwt>=2.0\",\n \"pynacl>=1.4.0\",\n \"requests>=2.14.0\",\n ],\n extras_require={\"integrations\": [\"cryptography\"]},\n tests_require=[\"cryptography\", \"httpretty>=1.0.3\"],\n )\n",
"path": "setup.py"
}
] | diff --git a/requirements.txt b/requirements.txt
index d04aaf9c1c..0892b7ff0d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,6 @@
pynacl>=1.4.0
requests>=2.14.0
-pyjwt<2.0
+pyjwt>=2.0
sphinx<3
sphinx-rtd-theme<0.6
Deprecated
diff --git a/setup.py b/setup.py
index d6fa62dbd0..7157bc8773 100755
--- a/setup.py
+++ b/setup.py
@@ -105,7 +105,7 @@
python_requires=">=3.6",
install_requires=[
"deprecated",
- "pyjwt<2.0",
+ "pyjwt>=2.0",
"pynacl>=1.4.0",
"requests>=2.14.0",
],
diff --git a/tests/GitRelease.py b/tests/GitRelease.py
index 6e4cc5a8af..7828702220 100644
--- a/tests/GitRelease.py
+++ b/tests/GitRelease.py
@@ -206,7 +206,9 @@ def testUploadAsset(self):
def testUploadAssetWithName(self):
self.setUpNewRelease()
release = self.new_release
- r = release.upload_asset(self.artifact_path, name="foobar.zip")
+ r = release.upload_asset(
+ self.artifact_path, name="foobar.zip", content_type="application/zip"
+ )
self.assertEqual(r.name, "foobar.zip")
self.tearDownNewRelease()
|
deis__deis-834 | Support both Dockerfiles and Procfiles
Currently, deploying any codebase that contains both a Procfile and a Dockerfile will receive an error:
```
name 'slug_path' is not defined
```
This is because the variable `slug_path` is not defined for the relevant context in Python (see `builder/templates/builder` around line 60).
Ideally, a Dockerfile-based app with a Procfile should be treated exactly the same as a buildpack-based app with a Procfile. That is, the commands in the Procfile should be considered authoritative, and supercede the CMD in the Dockerfile.
So just like with buildpacks, the same build output (in this case a container image, not a slug) can still be run with the command from the Procfile, right? The only issue I can think of might be that the Procfile format (with which I'm pretty unfamiliar) may allow sequences of commands (or worse, I/O redirection) that won't work as a custom run argument to the docker executable.
But I've had reasonable success applying arbitrary shell commands to random containers, even if i have to wrap it in Bash like:
```
CMD ["bash", "-c", "bundle exec rake release && exec bundle exec rackup -p $PORT"]
```
Is this feature possible?
| [
{
"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nData models for the Deis API.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport etcd\nimport importlib\nimport logging\nimport os\nimport subprocess\n\nfrom celery.canvas import group\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.db.models.signals import post_delete\nfrom django.db.models.signals import post_save\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django_fsm import FSMField, transition\nfrom django_fsm.signals import post_transition\nfrom json_field.fields import JSONField\n\nfrom api import fields, tasks\nfrom registry import publish_release\nfrom utils import dict_diff, fingerprint\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef log_event(app, msg, level=logging.INFO):\n msg = \"{}: {}\".format(app.id, msg)\n logger.log(level, msg)\n\n\nclass AuditedModel(models.Model):\n \"\"\"Add created and updated fields to a model.\"\"\"\n\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n class Meta:\n \"\"\"Mark :class:`AuditedModel` as abstract.\"\"\"\n abstract = True\n\n\nclass UuidAuditedModel(AuditedModel):\n \"\"\"Add a UUID primary key to an :class:`AuditedModel`.\"\"\"\n\n uuid = fields.UuidField('UUID', primary_key=True)\n\n class Meta:\n \"\"\"Mark :class:`UuidAuditedModel` as abstract.\"\"\"\n abstract = True\n\n\n@python_2_unicode_compatible\nclass Cluster(UuidAuditedModel):\n \"\"\"\n Cluster used to run jobs\n \"\"\"\n\n CLUSTER_TYPES = (('mock', 'Mock Cluster'),\n ('coreos', 'CoreOS Cluster'),\n ('faulty', 'Faulty Cluster'))\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.CharField(max_length=128, unique=True)\n type = models.CharField(max_length=16, choices=CLUSTER_TYPES, default='coreos')\n\n domain = models.CharField(max_length=128)\n hosts = models.CharField(max_length=256)\n auth = models.TextField()\n options = JSONField(default='{}', blank=True)\n\n def __str__(self):\n return self.id\n\n def _get_scheduler(self, *args, **kwargs):\n module_name = 'scheduler.' + self.type\n mod = importlib.import_module(module_name)\n return mod.SchedulerClient(self.id, self.hosts, self.auth,\n self.domain, self.options)\n\n _scheduler = property(_get_scheduler)\n\n def create(self):\n \"\"\"\n Initialize a cluster's router and log aggregator\n \"\"\"\n return tasks.create_cluster.delay(self).get()\n\n def destroy(self):\n \"\"\"\n Destroy a cluster's router and log aggregator\n \"\"\"\n return tasks.destroy_cluster.delay(self).get()\n\n\n@python_2_unicode_compatible\nclass App(UuidAuditedModel):\n \"\"\"\n Application used to service requests on behalf of end-users\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.SlugField(max_length=64, unique=True)\n cluster = models.ForeignKey('Cluster')\n structure = JSONField(default='{}', blank=True)\n\n class Meta:\n permissions = (('use_app', 'Can use app'),)\n\n def __str__(self):\n return self.id\n\n def create(self, *args, **kwargs):\n config = Config.objects.create(owner=self.owner, app=self, values={})\n build = Build.objects.create(owner=self.owner, app=self, image=settings.DEFAULT_BUILD)\n Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=build)\n\n def destroy(self, *args, **kwargs):\n for c in self.container_set.all():\n c.destroy()\n\n def deploy(self, release):\n tasks.deploy_release.delay(self, release).get()\n if self.structure == {}:\n # scale the web process by 1 initially\n self.structure = {'web': 1}\n self.save()\n self.scale()\n\n def scale(self, **kwargs):\n \"\"\"Scale containers up or down to match requested.\"\"\"\n requested_containers = self.structure.copy()\n release = self.release_set.latest()\n # increment new container nums off the most recent container\n all_containers = self.container_set.all().order_by('-created')\n container_num = 1 if not all_containers else all_containers[0].num + 1\n msg = 'Containers scaled ' + ' '.join(\n \"{}={}\".format(k, v) for k, v in requested_containers.items())\n # iterate and scale by container type (web, worker, etc)\n changed = False\n to_add, to_remove = [], []\n for container_type in requested_containers.keys():\n containers = list(self.container_set.filter(type=container_type).order_by('created'))\n requested = requested_containers.pop(container_type)\n diff = requested - len(containers)\n if diff == 0:\n continue\n changed = True\n while diff < 0:\n c = containers.pop()\n to_remove.append(c)\n diff += 1\n while diff > 0:\n c = Container.objects.create(owner=self.owner,\n app=self,\n release=release,\n type=container_type,\n num=container_num)\n to_add.append(c)\n container_num += 1\n diff -= 1\n if changed:\n subtasks = []\n if to_add:\n subtasks.append(tasks.start_containers.s(to_add))\n if to_remove:\n subtasks.append(tasks.stop_containers.s(to_remove))\n group(*subtasks).apply_async().join()\n log_event(self, msg)\n return changed\n\n def logs(self):\n \"\"\"Return aggregated log data for this application.\"\"\"\n path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')\n if not os.path.exists(path):\n raise EnvironmentError('Could not locate logs')\n data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path])\n return data\n\n def run(self, command):\n \"\"\"Run a one-off command in an ephemeral app container.\"\"\"\n # TODO: add support for interactive shell\n log_event(self, \"deis run '{}'\".format(command))\n c_num = max([c.num for c in self.container_set.filter(type='admin')] or [0]) + 1\n c = Container.objects.create(owner=self.owner,\n app=self,\n release=self.release_set.latest(),\n type='admin',\n num=c_num)\n rc, output = tasks.run_command.delay(c, command).get()\n return rc, output\n\n\n@python_2_unicode_compatible\nclass Container(UuidAuditedModel):\n \"\"\"\n Docker container used to securely host an application process.\n \"\"\"\n INITIALIZED = 'initialized'\n CREATED = 'created'\n UP = 'up'\n DOWN = 'down'\n DESTROYED = 'destroyed'\n STATE_CHOICES = (\n (INITIALIZED, 'initialized'),\n (CREATED, 'created'),\n (UP, 'up'),\n (DOWN, 'down'),\n (DESTROYED, 'destroyed')\n )\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n release = models.ForeignKey('Release')\n type = models.CharField(max_length=128, blank=True)\n num = models.PositiveIntegerField()\n state = FSMField(default=INITIALIZED, choices=STATE_CHOICES, protected=True)\n\n def short_name(self):\n if self.type:\n return \"{}.{}.{}\".format(self.release.app.id, self.type, self.num)\n return \"{}.{}\".format(self.release.app.id, self.num)\n short_name.short_description = 'Name'\n\n def __str__(self):\n return self.short_name()\n\n class Meta:\n get_latest_by = '-created'\n ordering = ['created']\n\n def _get_job_id(self):\n app = self.app.id\n release = self.release\n version = \"v{}\".format(release.version)\n num = self.num\n c_type = self.type\n if not c_type:\n job_id = \"{app}_{version}.{num}\".format(**locals())\n else:\n job_id = \"{app}_{version}.{c_type}.{num}\".format(**locals())\n return job_id\n\n _job_id = property(_get_job_id)\n\n def _get_scheduler(self):\n return self.app.cluster._scheduler\n\n _scheduler = property(_get_scheduler)\n\n def _get_command(self):\n c_type = self.type\n if c_type:\n return 'start {c_type}'\n else:\n return ''\n\n _command = property(_get_command)\n\n @transition(field=state, source=INITIALIZED, target=CREATED)\n def create(self):\n image = self.release.image\n c_type = self.type\n self._scheduler.create(self._job_id, image, self._command.format(**locals()))\n\n @transition(field=state,\n source=[CREATED, UP, DOWN],\n target=UP, crashed=DOWN)\n def start(self):\n self._scheduler.start(self._job_id)\n\n @transition(field=state,\n source=[INITIALIZED, CREATED, UP, DOWN],\n target=UP,\n crashed=DOWN)\n def deploy(self, release):\n old_job_id = self._job_id\n # update release\n self.release = release\n self.save()\n # deploy new container\n new_job_id = self._job_id\n image = self.release.image\n c_type = self.type\n self._scheduler.create(new_job_id, image, self._command.format(**locals()))\n self._scheduler.start(new_job_id)\n # destroy old container\n self._scheduler.destroy(old_job_id)\n\n @transition(field=state, source=UP, target=DOWN)\n def stop(self):\n self._scheduler.stop(self._job_id)\n\n @transition(field=state,\n source=[INITIALIZED, CREATED, UP, DOWN],\n target=DESTROYED)\n def destroy(self):\n # TODO: add check for active connections before killing\n self._scheduler.destroy(self._job_id)\n\n @transition(field=state,\n source=[INITIALIZED, CREATED, DESTROYED],\n target=DESTROYED)\n def run(self, command):\n \"\"\"Run a one-off command\"\"\"\n rc, output = self._scheduler.run(self._job_id, self.release.image, command)\n return rc, output\n\n\n@python_2_unicode_compatible\nclass Push(UuidAuditedModel):\n \"\"\"\n Instance of a push used to trigger an application build\n \"\"\"\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n sha = models.CharField(max_length=40)\n\n fingerprint = models.CharField(max_length=255)\n receive_user = models.CharField(max_length=255)\n receive_repo = models.CharField(max_length=255)\n\n ssh_connection = models.CharField(max_length=255)\n ssh_original_command = models.CharField(max_length=255)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{0}-{1}\".format(self.app.id, self.sha[:7])\n\n\n@python_2_unicode_compatible\nclass Build(UuidAuditedModel):\n \"\"\"\n Instance of a software build used by runtime nodes\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n image = models.CharField(max_length=256)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{0}-{1}\".format(self.app.id, self.uuid[:7])\n\n\n@python_2_unicode_compatible\nclass Config(UuidAuditedModel):\n \"\"\"\n Set of configuration values applied as environment variables\n during runtime execution of the Application.\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n values = JSONField(default='{}', blank=True)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{}-{}\".format(self.app.id, self.uuid[:7])\n\n\n@python_2_unicode_compatible\nclass Release(UuidAuditedModel):\n \"\"\"\n Software release deployed by the application platform\n\n Releases contain a :class:`Build` and a :class:`Config`.\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n version = models.PositiveIntegerField()\n summary = models.TextField(blank=True, null=True)\n\n config = models.ForeignKey('Config')\n build = models.ForeignKey('Build')\n # NOTE: image contains combined build + config, ready to run\n image = models.CharField(max_length=256)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'version'),)\n\n def __str__(self):\n return \"{0}-v{1}\".format(self.app.id, self.version)\n\n def new(self, user, config=None, build=None, summary=None):\n \"\"\"\n Create a new application release using the provided Build and Config\n on behalf of a user.\n\n Releases start at v1 and auto-increment.\n \"\"\"\n if not config:\n config = self.config\n if not build:\n build = self.build\n # prepare release tag\n new_version = self.version + 1\n tag = 'v{}'.format(new_version)\n image = build.image + ':{tag}'.format(**locals())\n # create new release and auto-increment version\n release = Release.objects.create(\n owner=user, app=self.app, config=config,\n build=build, version=new_version, image=image, summary=summary)\n # publish release to registry as new docker image\n repository_path = \"{}/{}\".format(user.username, self.app.id)\n publish_release(repository_path, config.values, tag)\n return release\n\n def previous(self):\n \"\"\"\n Return the previous Release to this one.\n\n :return: the previous :class:`Release`, or None\n \"\"\"\n releases = self.app.release_set\n if self.pk:\n releases = releases.exclude(pk=self.pk)\n try:\n # Get the Release previous to this one\n prev_release = releases.latest()\n except Release.DoesNotExist:\n prev_release = None\n return prev_release\n\n def save(self, *args, **kwargs):\n if not self.summary:\n self.summary = ''\n prev_release = self.previous()\n # compare this build to the previous build\n old_build = prev_release.build if prev_release else None\n # if the build changed, log it and who pushed it\n if self.build != old_build:\n self.summary += \"{} deployed {}\".format(self.build.owner, self.build.image)\n # compare this config to the previous config\n old_config = prev_release.config if prev_release else None\n # if the config data changed, log the dict diff\n if self.config != old_config:\n dict1 = self.config.values\n dict2 = old_config.values if old_config else {}\n diff = dict_diff(dict1, dict2)\n # try to be as succinct as possible\n added = ', '.join(k for k in diff.get('added', {}))\n added = 'added ' + added if added else ''\n changed = ', '.join(k for k in diff.get('changed', {}))\n changed = 'changed ' + changed if changed else ''\n deleted = ', '.join(k for k in diff.get('deleted', {}))\n deleted = 'deleted ' + deleted if deleted else ''\n changes = ', '.join(i for i in (added, changed, deleted) if i)\n if changes:\n if self.summary:\n self.summary += ' and '\n self.summary += \"{} {}\".format(self.config.owner, changes)\n if not self.summary:\n if self.version == 1:\n self.summary = \"{} created the initial release\".format(self.owner)\n else:\n self.summary = \"{} changed nothing\".format(self.owner)\n super(Release, self).save(*args, **kwargs)\n\n\n@python_2_unicode_compatible\nclass Key(UuidAuditedModel):\n \"\"\"An SSH public key.\"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.CharField(max_length=128)\n public = models.TextField(unique=True)\n\n class Meta:\n verbose_name = 'SSH Key'\n unique_together = (('owner', 'id'))\n\n def __str__(self):\n return \"{}...{}\".format(self.public[:18], self.public[-31:])\n\n\n# define update/delete callbacks for synchronizing\n# models with the configuration management backend\n\n\ndef _log_build_created(**kwargs):\n if kwargs.get('created'):\n build = kwargs['instance']\n log_event(build.app, \"Build {} created\".format(build))\n\n\ndef _log_release_created(**kwargs):\n if kwargs.get('created'):\n release = kwargs['instance']\n log_event(release.app, \"Release {} created\".format(release))\n\n\ndef _log_config_updated(**kwargs):\n config = kwargs['instance']\n log_event(config.app, \"Config {} updated\".format(config))\n\n\ndef _etcd_publish_key(**kwargs):\n key = kwargs['instance']\n _etcd_client.write('/deis/builder/users/{}/{}'.format(\n key.owner.username, fingerprint(key.public)), key.public)\n\n\ndef _etcd_purge_key(**kwargs):\n key = kwargs['instance']\n _etcd_client.delete('/deis/builder/users/{}/{}'.format(\n key.owner.username, fingerprint(key.public)))\n\n\ndef _etcd_purge_user(**kwargs):\n username = kwargs['instance'].username\n _etcd_client.delete('/deis/builder/users/{}'.format(username), dir=True, recursive=True)\n\n\n# Log significant app-related events\npost_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models')\npost_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models')\npost_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models')\n\n\n# save FSM transitions as they happen\ndef _save_transition(**kwargs):\n kwargs['instance'].save()\n\npost_transition.connect(_save_transition)\n\n# wire up etcd publishing if we can connect\ntry:\n _etcd_client = etcd.Client(host=settings.ETCD_HOST, port=int(settings.ETCD_PORT))\n _etcd_client.get('/deis')\nexcept etcd.EtcdException:\n logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster')\n _etcd_client = None\n\nif _etcd_client:\n post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models')\n post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models')\n post_delete.connect(_etcd_purge_user, sender=User, dispatch_uid='api.models')\n",
"path": "controller/api/models.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nData models for the Deis API.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport etcd\nimport importlib\nimport logging\nimport os\nimport subprocess\n\nfrom celery.canvas import group\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.db.models.signals import post_delete\nfrom django.db.models.signals import post_save\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django_fsm import FSMField, transition\nfrom django_fsm.signals import post_transition\nfrom json_field.fields import JSONField\n\nfrom api import fields, tasks\nfrom registry import publish_release\nfrom utils import dict_diff, fingerprint\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef log_event(app, msg, level=logging.INFO):\n msg = \"{}: {}\".format(app.id, msg)\n logger.log(level, msg)\n\n\nclass AuditedModel(models.Model):\n \"\"\"Add created and updated fields to a model.\"\"\"\n\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n class Meta:\n \"\"\"Mark :class:`AuditedModel` as abstract.\"\"\"\n abstract = True\n\n\nclass UuidAuditedModel(AuditedModel):\n \"\"\"Add a UUID primary key to an :class:`AuditedModel`.\"\"\"\n\n uuid = fields.UuidField('UUID', primary_key=True)\n\n class Meta:\n \"\"\"Mark :class:`UuidAuditedModel` as abstract.\"\"\"\n abstract = True\n\n\n@python_2_unicode_compatible\nclass Cluster(UuidAuditedModel):\n \"\"\"\n Cluster used to run jobs\n \"\"\"\n\n CLUSTER_TYPES = (('mock', 'Mock Cluster'),\n ('coreos', 'CoreOS Cluster'),\n ('faulty', 'Faulty Cluster'))\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.CharField(max_length=128, unique=True)\n type = models.CharField(max_length=16, choices=CLUSTER_TYPES, default='coreos')\n\n domain = models.CharField(max_length=128)\n hosts = models.CharField(max_length=256)\n auth = models.TextField()\n options = JSONField(default='{}', blank=True)\n\n def __str__(self):\n return self.id\n\n def _get_scheduler(self, *args, **kwargs):\n module_name = 'scheduler.' + self.type\n mod = importlib.import_module(module_name)\n return mod.SchedulerClient(self.id, self.hosts, self.auth,\n self.domain, self.options)\n\n _scheduler = property(_get_scheduler)\n\n def create(self):\n \"\"\"\n Initialize a cluster's router and log aggregator\n \"\"\"\n return tasks.create_cluster.delay(self).get()\n\n def destroy(self):\n \"\"\"\n Destroy a cluster's router and log aggregator\n \"\"\"\n return tasks.destroy_cluster.delay(self).get()\n\n\n@python_2_unicode_compatible\nclass App(UuidAuditedModel):\n \"\"\"\n Application used to service requests on behalf of end-users\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.SlugField(max_length=64, unique=True)\n cluster = models.ForeignKey('Cluster')\n structure = JSONField(default='{}', blank=True)\n\n class Meta:\n permissions = (('use_app', 'Can use app'),)\n\n def __str__(self):\n return self.id\n\n def create(self, *args, **kwargs):\n config = Config.objects.create(owner=self.owner, app=self, values={})\n build = Build.objects.create(owner=self.owner, app=self, image=settings.DEFAULT_BUILD)\n Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=build)\n\n def destroy(self, *args, **kwargs):\n for c in self.container_set.all():\n c.destroy()\n\n def deploy(self, release):\n tasks.deploy_release.delay(self, release).get()\n if self.structure == {}:\n # scale the web process by 1 initially\n self.structure = {'web': 1}\n self.save()\n self.scale()\n\n def scale(self, **kwargs):\n \"\"\"Scale containers up or down to match requested.\"\"\"\n requested_containers = self.structure.copy()\n release = self.release_set.latest()\n # increment new container nums off the most recent container\n all_containers = self.container_set.all().order_by('-created')\n container_num = 1 if not all_containers else all_containers[0].num + 1\n msg = 'Containers scaled ' + ' '.join(\n \"{}={}\".format(k, v) for k, v in requested_containers.items())\n # iterate and scale by container type (web, worker, etc)\n changed = False\n to_add, to_remove = [], []\n for container_type in requested_containers.keys():\n containers = list(self.container_set.filter(type=container_type).order_by('created'))\n requested = requested_containers.pop(container_type)\n diff = requested - len(containers)\n if diff == 0:\n continue\n changed = True\n while diff < 0:\n c = containers.pop()\n to_remove.append(c)\n diff += 1\n while diff > 0:\n c = Container.objects.create(owner=self.owner,\n app=self,\n release=release,\n type=container_type,\n num=container_num)\n to_add.append(c)\n container_num += 1\n diff -= 1\n if changed:\n subtasks = []\n if to_add:\n subtasks.append(tasks.start_containers.s(to_add))\n if to_remove:\n subtasks.append(tasks.stop_containers.s(to_remove))\n group(*subtasks).apply_async().join()\n log_event(self, msg)\n return changed\n\n def logs(self):\n \"\"\"Return aggregated log data for this application.\"\"\"\n path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')\n if not os.path.exists(path):\n raise EnvironmentError('Could not locate logs')\n data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path])\n return data\n\n def run(self, command):\n \"\"\"Run a one-off command in an ephemeral app container.\"\"\"\n # TODO: add support for interactive shell\n log_event(self, \"deis run '{}'\".format(command))\n c_num = max([c.num for c in self.container_set.filter(type='admin')] or [0]) + 1\n c = Container.objects.create(owner=self.owner,\n app=self,\n release=self.release_set.latest(),\n type='admin',\n num=c_num)\n rc, output = tasks.run_command.delay(c, command).get()\n return rc, output\n\n\n@python_2_unicode_compatible\nclass Container(UuidAuditedModel):\n \"\"\"\n Docker container used to securely host an application process.\n \"\"\"\n INITIALIZED = 'initialized'\n CREATED = 'created'\n UP = 'up'\n DOWN = 'down'\n DESTROYED = 'destroyed'\n STATE_CHOICES = (\n (INITIALIZED, 'initialized'),\n (CREATED, 'created'),\n (UP, 'up'),\n (DOWN, 'down'),\n (DESTROYED, 'destroyed')\n )\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n release = models.ForeignKey('Release')\n type = models.CharField(max_length=128, blank=True)\n num = models.PositiveIntegerField()\n state = FSMField(default=INITIALIZED, choices=STATE_CHOICES, protected=True)\n\n def short_name(self):\n if self.type:\n return \"{}.{}.{}\".format(self.release.app.id, self.type, self.num)\n return \"{}.{}\".format(self.release.app.id, self.num)\n short_name.short_description = 'Name'\n\n def __str__(self):\n return self.short_name()\n\n class Meta:\n get_latest_by = '-created'\n ordering = ['created']\n\n def _get_job_id(self):\n app = self.app.id\n release = self.release\n version = \"v{}\".format(release.version)\n num = self.num\n c_type = self.type\n if not c_type:\n job_id = \"{app}_{version}.{num}\".format(**locals())\n else:\n job_id = \"{app}_{version}.{c_type}.{num}\".format(**locals())\n return job_id\n\n _job_id = property(_get_job_id)\n\n def _get_scheduler(self):\n return self.app.cluster._scheduler\n\n _scheduler = property(_get_scheduler)\n\n def _get_command(self):\n c_type = self.type\n if c_type:\n return \"cat Procfile | grep ^{c_type} | cut -f 1 -d ' ' --complement | sh -\"\n else:\n return ''\n\n _command = property(_get_command)\n\n @transition(field=state, source=INITIALIZED, target=CREATED)\n def create(self):\n image = self.release.image\n c_type = self.type\n self._scheduler.create(self._job_id, image, self._command.format(**locals()))\n\n @transition(field=state,\n source=[CREATED, UP, DOWN],\n target=UP, crashed=DOWN)\n def start(self):\n self._scheduler.start(self._job_id)\n\n @transition(field=state,\n source=[INITIALIZED, CREATED, UP, DOWN],\n target=UP,\n crashed=DOWN)\n def deploy(self, release):\n old_job_id = self._job_id\n # update release\n self.release = release\n self.save()\n # deploy new container\n new_job_id = self._job_id\n image = self.release.image\n c_type = self.type\n self._scheduler.create(new_job_id, image, self._command.format(**locals()))\n self._scheduler.start(new_job_id)\n # destroy old container\n self._scheduler.destroy(old_job_id)\n\n @transition(field=state, source=UP, target=DOWN)\n def stop(self):\n self._scheduler.stop(self._job_id)\n\n @transition(field=state,\n source=[INITIALIZED, CREATED, UP, DOWN],\n target=DESTROYED)\n def destroy(self):\n # TODO: add check for active connections before killing\n self._scheduler.destroy(self._job_id)\n\n @transition(field=state,\n source=[INITIALIZED, CREATED, DESTROYED],\n target=DESTROYED)\n def run(self, command):\n \"\"\"Run a one-off command\"\"\"\n rc, output = self._scheduler.run(self._job_id, self.release.image, command)\n return rc, output\n\n\n@python_2_unicode_compatible\nclass Push(UuidAuditedModel):\n \"\"\"\n Instance of a push used to trigger an application build\n \"\"\"\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n sha = models.CharField(max_length=40)\n\n fingerprint = models.CharField(max_length=255)\n receive_user = models.CharField(max_length=255)\n receive_repo = models.CharField(max_length=255)\n\n ssh_connection = models.CharField(max_length=255)\n ssh_original_command = models.CharField(max_length=255)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{0}-{1}\".format(self.app.id, self.sha[:7])\n\n\n@python_2_unicode_compatible\nclass Build(UuidAuditedModel):\n \"\"\"\n Instance of a software build used by runtime nodes\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n image = models.CharField(max_length=256)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{0}-{1}\".format(self.app.id, self.uuid[:7])\n\n\n@python_2_unicode_compatible\nclass Config(UuidAuditedModel):\n \"\"\"\n Set of configuration values applied as environment variables\n during runtime execution of the Application.\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n values = JSONField(default='{}', blank=True)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'uuid'),)\n\n def __str__(self):\n return \"{}-{}\".format(self.app.id, self.uuid[:7])\n\n\n@python_2_unicode_compatible\nclass Release(UuidAuditedModel):\n \"\"\"\n Software release deployed by the application platform\n\n Releases contain a :class:`Build` and a :class:`Config`.\n \"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n app = models.ForeignKey('App')\n version = models.PositiveIntegerField()\n summary = models.TextField(blank=True, null=True)\n\n config = models.ForeignKey('Config')\n build = models.ForeignKey('Build')\n # NOTE: image contains combined build + config, ready to run\n image = models.CharField(max_length=256)\n\n class Meta:\n get_latest_by = 'created'\n ordering = ['-created']\n unique_together = (('app', 'version'),)\n\n def __str__(self):\n return \"{0}-v{1}\".format(self.app.id, self.version)\n\n def new(self, user, config=None, build=None, summary=None):\n \"\"\"\n Create a new application release using the provided Build and Config\n on behalf of a user.\n\n Releases start at v1 and auto-increment.\n \"\"\"\n if not config:\n config = self.config\n if not build:\n build = self.build\n # prepare release tag\n new_version = self.version + 1\n tag = 'v{}'.format(new_version)\n image = build.image + ':{tag}'.format(**locals())\n # create new release and auto-increment version\n release = Release.objects.create(\n owner=user, app=self.app, config=config,\n build=build, version=new_version, image=image, summary=summary)\n # publish release to registry as new docker image\n repository_path = \"{}/{}\".format(user.username, self.app.id)\n publish_release(repository_path, config.values, tag)\n return release\n\n def previous(self):\n \"\"\"\n Return the previous Release to this one.\n\n :return: the previous :class:`Release`, or None\n \"\"\"\n releases = self.app.release_set\n if self.pk:\n releases = releases.exclude(pk=self.pk)\n try:\n # Get the Release previous to this one\n prev_release = releases.latest()\n except Release.DoesNotExist:\n prev_release = None\n return prev_release\n\n def save(self, *args, **kwargs):\n if not self.summary:\n self.summary = ''\n prev_release = self.previous()\n # compare this build to the previous build\n old_build = prev_release.build if prev_release else None\n # if the build changed, log it and who pushed it\n if self.build != old_build:\n self.summary += \"{} deployed {}\".format(self.build.owner, self.build.image)\n # compare this config to the previous config\n old_config = prev_release.config if prev_release else None\n # if the config data changed, log the dict diff\n if self.config != old_config:\n dict1 = self.config.values\n dict2 = old_config.values if old_config else {}\n diff = dict_diff(dict1, dict2)\n # try to be as succinct as possible\n added = ', '.join(k for k in diff.get('added', {}))\n added = 'added ' + added if added else ''\n changed = ', '.join(k for k in diff.get('changed', {}))\n changed = 'changed ' + changed if changed else ''\n deleted = ', '.join(k for k in diff.get('deleted', {}))\n deleted = 'deleted ' + deleted if deleted else ''\n changes = ', '.join(i for i in (added, changed, deleted) if i)\n if changes:\n if self.summary:\n self.summary += ' and '\n self.summary += \"{} {}\".format(self.config.owner, changes)\n if not self.summary:\n if self.version == 1:\n self.summary = \"{} created the initial release\".format(self.owner)\n else:\n self.summary = \"{} changed nothing\".format(self.owner)\n super(Release, self).save(*args, **kwargs)\n\n\n@python_2_unicode_compatible\nclass Key(UuidAuditedModel):\n \"\"\"An SSH public key.\"\"\"\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n id = models.CharField(max_length=128)\n public = models.TextField(unique=True)\n\n class Meta:\n verbose_name = 'SSH Key'\n unique_together = (('owner', 'id'))\n\n def __str__(self):\n return \"{}...{}\".format(self.public[:18], self.public[-31:])\n\n\n# define update/delete callbacks for synchronizing\n# models with the configuration management backend\n\n\ndef _log_build_created(**kwargs):\n if kwargs.get('created'):\n build = kwargs['instance']\n log_event(build.app, \"Build {} created\".format(build))\n\n\ndef _log_release_created(**kwargs):\n if kwargs.get('created'):\n release = kwargs['instance']\n log_event(release.app, \"Release {} created\".format(release))\n\n\ndef _log_config_updated(**kwargs):\n config = kwargs['instance']\n log_event(config.app, \"Config {} updated\".format(config))\n\n\ndef _etcd_publish_key(**kwargs):\n key = kwargs['instance']\n _etcd_client.write('/deis/builder/users/{}/{}'.format(\n key.owner.username, fingerprint(key.public)), key.public)\n\n\ndef _etcd_purge_key(**kwargs):\n key = kwargs['instance']\n _etcd_client.delete('/deis/builder/users/{}/{}'.format(\n key.owner.username, fingerprint(key.public)))\n\n\ndef _etcd_purge_user(**kwargs):\n username = kwargs['instance'].username\n _etcd_client.delete('/deis/builder/users/{}'.format(username), dir=True, recursive=True)\n\n\n# Log significant app-related events\npost_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models')\npost_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models')\npost_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models')\n\n\n# save FSM transitions as they happen\ndef _save_transition(**kwargs):\n kwargs['instance'].save()\n\npost_transition.connect(_save_transition)\n\n# wire up etcd publishing if we can connect\ntry:\n _etcd_client = etcd.Client(host=settings.ETCD_HOST, port=int(settings.ETCD_PORT))\n _etcd_client.get('/deis')\nexcept etcd.EtcdException:\n logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster')\n _etcd_client = None\n\nif _etcd_client:\n post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models')\n post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models')\n post_delete.connect(_etcd_purge_user, sender=User, dispatch_uid='api.models')\n",
"path": "controller/api/models.py"
}
] | diff --git a/builder/templates/builder b/builder/templates/builder
index 95df25c43c..26c26c88eb 100755
--- a/builder/templates/builder
+++ b/builder/templates/builder
@@ -58,7 +58,9 @@ if __name__ == '__main__':
# check for Procfile
dockerfile = os.path.join(temp_dir, 'Dockerfile')
procfile = os.path.join(temp_dir, 'Procfile')
- if not os.path.exists(dockerfile) and os.path.exists(procfile):
+ if not os.path.exists(procfile):
+ raise Exception('Procfile must exist')
+ if not os.path.exists(dockerfile):
if os.path.exists('/buildpacks'):
build_cmd = "docker run -i -a stdin -v {cache_dir}:/tmp/cache:rw -v /buildpacks:/tmp/buildpacks deis/slugbuilder".format(**locals())
else:
diff --git a/controller/api/models.py b/controller/api/models.py
index 938e2563ed..71d2160f1a 100644
--- a/controller/api/models.py
+++ b/controller/api/models.py
@@ -257,7 +257,7 @@ def _get_scheduler(self):
def _get_command(self):
c_type = self.type
if c_type:
- return 'start {c_type}'
+ return "cat Procfile | grep ^{c_type} | cut -f 1 -d ' ' --complement | sh -"
else:
return ''
|
certbot__certbot-8776 | Fix lint and mypy with Python < 3.8
In https://github.com/certbot/certbot/pull/8748, we made a change that causes our lint and mypy tests to need to be run on Python 3.8+ to pass. See https://github.com/certbot/certbot/pull/8748#issuecomment-808790093 for the discussion of the problem here.
I don't think we should do this. Certbot supports Python 3.6+ and I think it could cause a particularly bad experience for new devs that don't happen to know they need Python 3.8+. This change also broke our development Dockerfile as can be seen at https://dev.azure.com/certbot/certbot/_build/results?buildId=3742&view=logs&j=bea2d267-f41e-5b33-7b51-a88065a8cbb0&t=0dc90756-6888-5ee6-5a6a-5855e6b9ae76&l=1873. Instead, I think we should change our approach here so the tests work on all versions of Python we support. I'm open to other ideas, but the two ideas I had for this are:
1. Just declare a runtime dependency on `typing-extensions`.
2. Add `typing-extensions` as a dev/test dependency and try to import it, but use similar fallback code to what we current have if it's not available.
What do you think @adferrand? Are you interested in working on this?
| [
{
"content": "import codecs\nfrom distutils.version import LooseVersion\nimport os\nimport re\nimport sys\n\nfrom setuptools import __version__ as setuptools_version\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nmin_setuptools_version='39.0.1'\n# This conditional isn't necessary, but it provides better error messages to\n# people who try to install this package with older versions of setuptools.\nif LooseVersion(setuptools_version) < LooseVersion(min_setuptools_version):\n raise RuntimeError(f'setuptools {min_setuptools_version}+ is required')\n\n# Workaround for https://bugs.python.org/issue8876, see\n# https://bugs.python.org/issue8876#msg208792\n# This can be removed when using Python 2.7.9 or later:\n# https://hg.python.org/cpython/raw-file/v2.7.9/Misc/NEWS\nif os.path.abspath(__file__).split(os.path.sep)[1] == 'vagrant':\n del os.link\n\n\ndef read_file(filename, encoding='utf8'):\n \"\"\"Read unicode from given file.\"\"\"\n with codecs.open(filename, encoding=encoding) as fd:\n return fd.read()\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# read version number (and other metadata) from package init\ninit_fn = os.path.join(here, 'certbot', '__init__.py')\nmeta = dict(re.findall(r\"\"\"__([a-z]+)__ = '([^']+)\"\"\", read_file(init_fn)))\n\nreadme = read_file(os.path.join(here, 'README.rst'))\nversion = meta['version']\n\n# This package relies on PyOpenSSL and requests, however, it isn't specified\n# here to avoid masking the more specific request requirements in acme. See\n# https://github.com/pypa/pip/issues/988 for more info.\ninstall_requires = [\n 'acme>=1.8.0',\n # We technically need ConfigArgParse 0.10.0 for Python 2.6 support, but\n # saying so here causes a runtime error against our temporary fork of 0.9.3\n # in which we added 2.6 support (see #2243), so we relax the requirement.\n 'ConfigArgParse>=0.9.3',\n 'configobj>=5.0.6',\n 'cryptography>=2.1.4',\n 'distro>=1.0.1',\n # 1.1.0+ is required to avoid the warnings described at\n # https://github.com/certbot/josepy/issues/13.\n 'josepy>=1.1.0',\n 'parsedatetime>=2.4',\n 'pyrfc3339',\n 'pytz',\n # This dependency needs to be added using environment markers to avoid its\n # installation on Linux.\n 'pywin32>=300 ; sys_platform == \"win32\"',\n f'setuptools>={min_setuptools_version}',\n 'zope.component',\n 'zope.interface',\n]\n\ndev_extras = [\n 'astroid',\n 'azure-devops',\n 'coverage',\n 'ipdb',\n 'mypy',\n 'PyGithub',\n # 1.1.0+ is required for poetry to use the poetry-core library for the\n # build system declared in tools/pinning/pyproject.toml.\n 'poetry>=1.1.0',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-xdist',\n 'tox',\n 'twine',\n 'wheel',\n]\n\ndocs_extras = [\n # If you have Sphinx<1.5.1, you need docutils<0.13.1\n # https://github.com/sphinx-doc/sphinx/issues/3212\n 'repoze.sphinx.autointerface',\n 'Sphinx>=1.2', # Annotation support\n 'sphinx_rtd_theme',\n]\n\nsetup(\n name='certbot',\n version=version,\n description=\"ACME client\",\n long_description=readme,\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Certbot Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n python_requires='>=3.6',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Console :: Curses',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n 'Topic :: System :: Installation/Setup',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n ],\n\n packages=find_packages(exclude=['docs', 'examples', 'tests', 'venv']),\n include_package_data=True,\n\n install_requires=install_requires,\n extras_require={\n 'dev': dev_extras,\n 'docs': docs_extras,\n },\n\n entry_points={\n 'console_scripts': [\n 'certbot = certbot.main:main',\n ],\n 'certbot.plugins': [\n 'manual = certbot._internal.plugins.manual:Authenticator',\n 'null = certbot._internal.plugins.null:Installer',\n 'standalone = certbot._internal.plugins.standalone:Authenticator',\n 'webroot = certbot._internal.plugins.webroot:Authenticator',\n ],\n },\n)\n",
"path": "certbot/setup.py"
}
] | [
{
"content": "import codecs\nfrom distutils.version import LooseVersion\nimport os\nimport re\nimport sys\n\nfrom setuptools import __version__ as setuptools_version\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nmin_setuptools_version='39.0.1'\n# This conditional isn't necessary, but it provides better error messages to\n# people who try to install this package with older versions of setuptools.\nif LooseVersion(setuptools_version) < LooseVersion(min_setuptools_version):\n raise RuntimeError(f'setuptools {min_setuptools_version}+ is required')\n\n# Workaround for https://bugs.python.org/issue8876, see\n# https://bugs.python.org/issue8876#msg208792\n# This can be removed when using Python 2.7.9 or later:\n# https://hg.python.org/cpython/raw-file/v2.7.9/Misc/NEWS\nif os.path.abspath(__file__).split(os.path.sep)[1] == 'vagrant':\n del os.link\n\n\ndef read_file(filename, encoding='utf8'):\n \"\"\"Read unicode from given file.\"\"\"\n with codecs.open(filename, encoding=encoding) as fd:\n return fd.read()\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# read version number (and other metadata) from package init\ninit_fn = os.path.join(here, 'certbot', '__init__.py')\nmeta = dict(re.findall(r\"\"\"__([a-z]+)__ = '([^']+)\"\"\", read_file(init_fn)))\n\nreadme = read_file(os.path.join(here, 'README.rst'))\nversion = meta['version']\n\n# This package relies on PyOpenSSL and requests, however, it isn't specified\n# here to avoid masking the more specific request requirements in acme. See\n# https://github.com/pypa/pip/issues/988 for more info.\ninstall_requires = [\n 'acme>=1.8.0',\n # We technically need ConfigArgParse 0.10.0 for Python 2.6 support, but\n # saying so here causes a runtime error against our temporary fork of 0.9.3\n # in which we added 2.6 support (see #2243), so we relax the requirement.\n 'ConfigArgParse>=0.9.3',\n 'configobj>=5.0.6',\n 'cryptography>=2.1.4',\n 'distro>=1.0.1',\n # 1.1.0+ is required to avoid the warnings described at\n # https://github.com/certbot/josepy/issues/13.\n 'josepy>=1.1.0',\n 'parsedatetime>=2.4',\n 'pyrfc3339',\n 'pytz',\n # This dependency needs to be added using environment markers to avoid its\n # installation on Linux.\n 'pywin32>=300 ; sys_platform == \"win32\"',\n f'setuptools>={min_setuptools_version}',\n 'zope.component',\n 'zope.interface',\n]\n\ndev_extras = [\n 'astroid',\n 'azure-devops',\n 'coverage',\n 'ipdb',\n 'mypy',\n 'PyGithub',\n # 1.1.0+ is required for poetry to use the poetry-core library for the\n # build system declared in tools/pinning/pyproject.toml.\n 'poetry>=1.1.0',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-xdist',\n # typing-extensions is required to import typing.Protocol and make the mypy checks\n # pass (along with pylint about non-existent objects) on Python 3.6 & 3.7\n 'typing-extensions',\n 'tox',\n 'twine',\n 'wheel',\n]\n\ndocs_extras = [\n # If you have Sphinx<1.5.1, you need docutils<0.13.1\n # https://github.com/sphinx-doc/sphinx/issues/3212\n 'repoze.sphinx.autointerface',\n 'Sphinx>=1.2', # Annotation support\n 'sphinx_rtd_theme',\n]\n\nsetup(\n name='certbot',\n version=version,\n description=\"ACME client\",\n long_description=readme,\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Certbot Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n python_requires='>=3.6',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Console :: Curses',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n 'Topic :: System :: Installation/Setup',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n ],\n\n packages=find_packages(exclude=['docs', 'examples', 'tests', 'venv']),\n include_package_data=True,\n\n install_requires=install_requires,\n extras_require={\n 'dev': dev_extras,\n 'docs': docs_extras,\n },\n\n entry_points={\n 'console_scripts': [\n 'certbot = certbot.main:main',\n ],\n 'certbot.plugins': [\n 'manual = certbot._internal.plugins.manual:Authenticator',\n 'null = certbot._internal.plugins.null:Installer',\n 'standalone = certbot._internal.plugins.standalone:Authenticator',\n 'webroot = certbot._internal.plugins.webroot:Authenticator',\n ],\n },\n)\n",
"path": "certbot/setup.py"
}
] | diff --git a/certbot/certbot/plugins/dns_test_common.py b/certbot/certbot/plugins/dns_test_common.py
index f6d7bdca8cf..7a8df9329da 100644
--- a/certbot/certbot/plugins/dns_test_common.py
+++ b/certbot/certbot/plugins/dns_test_common.py
@@ -12,7 +12,7 @@
from certbot.tests import util as test_util
if typing.TYPE_CHECKING:
- from typing import Protocol
+ from typing_extensions import Protocol
else:
Protocol = object # type: ignore
diff --git a/certbot/certbot/plugins/dns_test_common_lexicon.py b/certbot/certbot/plugins/dns_test_common_lexicon.py
index 48261d39599..5c6f09d208d 100644
--- a/certbot/certbot/plugins/dns_test_common_lexicon.py
+++ b/certbot/certbot/plugins/dns_test_common_lexicon.py
@@ -18,7 +18,7 @@
except ImportError: # pragma: no cover
from unittest import mock # type: ignore
if typing.TYPE_CHECKING:
- from typing import Protocol
+ from typing_extensions import Protocol
else:
Protocol = object # type: ignore
diff --git a/certbot/setup.py b/certbot/setup.py
index 8843a35a21e..6913d8384c7 100644
--- a/certbot/setup.py
+++ b/certbot/setup.py
@@ -77,6 +77,9 @@ def read_file(filename, encoding='utf8'):
'pytest',
'pytest-cov',
'pytest-xdist',
+ # typing-extensions is required to import typing.Protocol and make the mypy checks
+ # pass (along with pylint about non-existent objects) on Python 3.6 & 3.7
+ 'typing-extensions',
'tox',
'twine',
'wheel',
diff --git a/tools/requirements.txt b/tools/requirements.txt
index 707cc64b9d1..e5a6d3be737 100644
--- a/tools/requirements.txt
+++ b/tools/requirements.txt
@@ -18,8 +18,8 @@ backcall==0.2.0
bcrypt==3.2.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6"
beautifulsoup4==4.9.3; python_version >= "3.6" and python_version < "4.0"
bleach==3.3.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0"
-boto3==1.17.42; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
-botocore==1.20.42; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
+boto3==1.17.44; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
+botocore==1.20.44; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
cachecontrol==0.12.6; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0"
cached-property==1.5.2; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6"
cachetools==4.2.1; python_version >= "3.5" and python_version < "4.0" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6")
@@ -36,7 +36,7 @@ configobj==5.0.6; python_version >= "3.6"
coverage==4.5.4; (python_version >= "2.6" and python_full_version < "3.0.0") or (python_full_version >= "3.3.0" and python_version < "4")
crashtest==0.3.1; python_version >= "3.6" and python_version < "4.0" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0")
cryptography==3.4.7; python_version >= "3.6" and python_full_version < "3.0.0" and python_version < "4.0" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0") and sys_platform == "linux" or python_full_version >= "3.5.0" and python_version >= "3.6" and python_version < "4.0" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0") and sys_platform == "linux"
-decorator==4.4.2; python_version == "3.6" and python_full_version < "3.0.0" or python_version == "3.6" and python_full_version >= "3.2.0"
+decorator==5.0.5
deprecated==1.2.12; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.4.0"
distlib==0.3.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0"
distro==1.5.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6"
@@ -99,7 +99,7 @@ ply==3.11; python_version >= "3.6"
poetry-core==1.0.2; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0"
poetry==1.1.5; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0"
prompt-toolkit==3.0.3
-protobuf==3.15.6; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
+protobuf==3.15.7; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
ptyprocess==0.7.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0"
py==1.10.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6"
pyasn1-modules==0.2.8; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
@@ -124,7 +124,7 @@ pytest==3.2.5
python-augeas==0.5.0
python-dateutil==2.8.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
python-digitalocean==1.16.0; python_version >= "3.6"
-python-dotenv==0.16.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6"
+python-dotenv==0.17.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6"
pytz==2021.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.6.0"
pywin32-ctypes==0.2.0; python_version >= "3.6" and python_version < "4.0" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0") and sys_platform == "win32"
pywin32==300; sys_platform == "win32" and python_version >= "3.6" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6")
@@ -144,7 +144,7 @@ shellingham==1.4.0; python_version >= "3.6" and python_full_version < "3.0.0" or
six==1.15.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6"
snowballstemmer==2.1.0; python_version >= "3.6"
soupsieve==2.2.1; python_version >= "3.6"
-sphinx-rtd-theme==0.5.1; python_version >= "3.6"
+sphinx-rtd-theme==0.5.2; python_version >= "3.6"
sphinx==3.5.3; python_version >= "3.6"
sphinxcontrib-applehelp==1.0.2; python_version >= "3.6"
sphinxcontrib-devhelp==1.0.2; python_version >= "3.6"
|
huggingface__diffusers-1149 | [Flax] 🚨 0.7.0 not working 🚨
### Describe the bug

### Reproduction
_No response_
### Logs
_No response_
### System Info
TPU v3-8
| [
{
"content": "# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport math\n\nimport flax.linen as nn\nimport jax.numpy as jnp\n\n\ndef get_sinusoidal_embeddings(\n timesteps: jnp.ndarray,\n embedding_dim: int,\n freq_shift: float = 1,\n min_timescale: float = 1,\n max_timescale: float = 1.0e4,\n flip_sin_to_cos: bool = False,\n scale: float = 1.0,\n) -> jnp.ndarray:\n \"\"\"Returns the positional encoding (same as Tensor2Tensor).\n Args:\n timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n embedding_dim: The number of output channels.\n min_timescale: The smallest time unit (should probably be 0.0).\n max_timescale: The largest time unit.\n Returns:\n a Tensor of timing signals [N, num_channels]\n \"\"\"\n assert timesteps.ndim == 1, \"Timesteps should be a 1d-array\"\n assert embedding_dim % 2 == 0, f\"Embedding dimension {embedding_dim} should be even\"\n num_timescales = float(embedding_dim // 2)\n log_timescale_increment = math.log(max_timescale / min_timescale) / (num_timescales - freq_shift)\n inv_timescales = min_timescale * jnp.exp(jnp.arange(num_timescales, dtype=jnp.float32) * -log_timescale_increment)\n emb = jnp.expand_dims(timesteps, 1) * jnp.expand_dims(inv_timescales, 0)\n\n # scale embeddings\n scaled_time = scale * emb\n\n if flip_sin_to_cos:\n signal = jnp.concatenate([jnp.cos(scaled_time), jnp.sin(scaled_time)], axis=1)\n else:\n signal = jnp.concatenate([jnp.sin(scaled_time), jnp.cos(scaled_time)], axis=1)\n signal = jnp.reshape(signal, [jnp.shape(timesteps)[0], embedding_dim])\n return signal\n\n\nclass FlaxTimestepEmbedding(nn.Module):\n r\"\"\"\n Time step Embedding Module. Learns embeddings for input time steps.\n\n Args:\n time_embed_dim (`int`, *optional*, defaults to `32`):\n Time step embedding dimension\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n time_embed_dim: int = 32\n dtype: jnp.dtype = jnp.float32\n\n @nn.compact\n def __call__(self, temb):\n temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name=\"linear_1\")(temb)\n temb = nn.silu(temb)\n temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name=\"linear_2\")(temb)\n return temb\n\n\nclass FlaxTimesteps(nn.Module):\n r\"\"\"\n Wrapper Module for sinusoidal Time step Embeddings as described in https://arxiv.org/abs/2006.11239\n\n Args:\n dim (`int`, *optional*, defaults to `32`):\n Time step embedding dimension\n \"\"\"\n dim: int = 32\n freq_shift: float = 1\n\n @nn.compact\n def __call__(self, timesteps):\n return get_sinusoidal_embeddings(timesteps, embedding_dim=self.dim, freq_shift=self.freq_shift)\n",
"path": "src/diffusers/models/embeddings_flax.py"
}
] | [
{
"content": "# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport math\n\nimport flax.linen as nn\nimport jax.numpy as jnp\n\n\ndef get_sinusoidal_embeddings(\n timesteps: jnp.ndarray,\n embedding_dim: int,\n freq_shift: float = 1,\n min_timescale: float = 1,\n max_timescale: float = 1.0e4,\n flip_sin_to_cos: bool = False,\n scale: float = 1.0,\n) -> jnp.ndarray:\n \"\"\"Returns the positional encoding (same as Tensor2Tensor).\n Args:\n timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n embedding_dim: The number of output channels.\n min_timescale: The smallest time unit (should probably be 0.0).\n max_timescale: The largest time unit.\n Returns:\n a Tensor of timing signals [N, num_channels]\n \"\"\"\n assert timesteps.ndim == 1, \"Timesteps should be a 1d-array\"\n assert embedding_dim % 2 == 0, f\"Embedding dimension {embedding_dim} should be even\"\n num_timescales = float(embedding_dim // 2)\n log_timescale_increment = math.log(max_timescale / min_timescale) / (num_timescales - freq_shift)\n inv_timescales = min_timescale * jnp.exp(jnp.arange(num_timescales, dtype=jnp.float32) * -log_timescale_increment)\n emb = jnp.expand_dims(timesteps, 1) * jnp.expand_dims(inv_timescales, 0)\n\n # scale embeddings\n scaled_time = scale * emb\n\n if flip_sin_to_cos:\n signal = jnp.concatenate([jnp.cos(scaled_time), jnp.sin(scaled_time)], axis=1)\n else:\n signal = jnp.concatenate([jnp.sin(scaled_time), jnp.cos(scaled_time)], axis=1)\n signal = jnp.reshape(signal, [jnp.shape(timesteps)[0], embedding_dim])\n return signal\n\n\nclass FlaxTimestepEmbedding(nn.Module):\n r\"\"\"\n Time step Embedding Module. Learns embeddings for input time steps.\n\n Args:\n time_embed_dim (`int`, *optional*, defaults to `32`):\n Time step embedding dimension\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n time_embed_dim: int = 32\n dtype: jnp.dtype = jnp.float32\n\n @nn.compact\n def __call__(self, temb):\n temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name=\"linear_1\")(temb)\n temb = nn.silu(temb)\n temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name=\"linear_2\")(temb)\n return temb\n\n\nclass FlaxTimesteps(nn.Module):\n r\"\"\"\n Wrapper Module for sinusoidal Time step Embeddings as described in https://arxiv.org/abs/2006.11239\n\n Args:\n dim (`int`, *optional*, defaults to `32`):\n Time step embedding dimension\n \"\"\"\n dim: int = 32\n freq_shift: float = 1\n\n @nn.compact\n def __call__(self, timesteps):\n return get_sinusoidal_embeddings(\n timesteps, embedding_dim=self.dim, freq_shift=self.freq_shift, flip_sin_to_cos=True\n )\n",
"path": "src/diffusers/models/embeddings_flax.py"
}
] | diff --git a/src/diffusers/models/embeddings_flax.py b/src/diffusers/models/embeddings_flax.py
index 1e2272c1fe70..bf7d54b82ec2 100644
--- a/src/diffusers/models/embeddings_flax.py
+++ b/src/diffusers/models/embeddings_flax.py
@@ -88,4 +88,6 @@ class FlaxTimesteps(nn.Module):
@nn.compact
def __call__(self, timesteps):
- return get_sinusoidal_embeddings(timesteps, embedding_dim=self.dim, freq_shift=self.freq_shift)
+ return get_sinusoidal_embeddings(
+ timesteps, embedding_dim=self.dim, freq_shift=self.freq_shift, flip_sin_to_cos=True
+ )
|
google__openhtf-1112 | Unused `six` import in monitor code
In `openhtf/core/monitors.py`, it looks like there is an unused import of the `six` module:
https://github.com/google/openhtf/blob/c85fb069a1ce407e82bb47a8fb1b64220e974c5f/openhtf/core/monitors.py#L58
If the aforementioned import is in fact not needed, then it should be deleted.
| [
{
"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Monitors provide a mechanism for periodically collecting a measurement.\n\nMonitors are implemented similar to phase functions - they are decorated\nwith plugs.plug() to pass plugs in. The return value of a monitor\nfunction, however, will be used to append a value to a measurement.\n\nMonitors by default poll at a rate of 1 second between invocations of\nthe monitor function. The poll interval (given in milliseconds) determines the\napproximate frequency at which values will be sampled. A sample is considered\nto have been taken at the time when the monitor function *returns*, not when\nit is called.\n\nThe approximate average duration of calls to the monitor function is taken into\naccount, so that samples are obtained on as close to interval_ms boundaries as\ncan be. A poll interval of 0 will cause the monitor function to be called in a\ntight loop with no delays.\n\nExample:\n\[email protected](current_meter=current_meter.CurrentMeter)\ndef CurrentMonitor(test, current_meter):\n return current_meter.GetReading()\n\[email protected]('current_draw', CurrentMonitor, units=units.AMPERE)\ndef MyPhase(test):\n # Do some stuff for a while...\n\n# MyPhase will have a dimensioned measurement on it, with units of 'AMPERE' and\n# a single dimension of 'MILLISECONDS', and will have values for roughly every\n# second while MyPhase was executing.\n\"\"\"\n\nimport functools\nimport inspect\nimport time\nfrom typing import Any, Callable, Dict, Optional, Text\n\nimport openhtf\nfrom openhtf import plugs\nfrom openhtf.core import measurements\nfrom openhtf.core import phase_descriptor\nfrom openhtf.core import test_state as core_test_state\nfrom openhtf.util import threads\nfrom openhtf.util import units as uom\nimport six\n\n\nclass _MonitorThread(threads.KillableThread):\n \"\"\"Background thread that runs a monitor.\"\"\"\n\n daemon = True\n\n def __init__(self, measurement_name: Text,\n monitor_desc: phase_descriptor.PhaseDescriptor,\n extra_kwargs: Dict[Any, Any],\n test_state: core_test_state.TestState, interval_ms: int):\n super(_MonitorThread,\n self).__init__(name='%s_MonitorThread' % measurement_name)\n self.measurement_name = measurement_name\n self.monitor_desc = monitor_desc\n self.test_state = test_state\n self.interval_ms = interval_ms\n self.extra_kwargs = extra_kwargs\n\n def get_value(self) -> Any:\n argspec = inspect.getfullargspec(self.monitor_desc.func)\n argspec_args = argspec.args\n argspec_keywords = argspec.varkw\n if argspec_keywords:\n # Monitor phase takes **kwargs, so just pass everything in.\n kwargs = self.extra_kwargs\n else:\n # Only pass in args that the monitor phase takes.\n kwargs = {\n arg: val for arg, val in self.extra_kwargs if arg in argspec_args\n }\n return self.monitor_desc.with_args(**kwargs)(self.test_state)\n\n def _thread_proc(self):\n measurement = getattr(self.test_state.test_api.measurements,\n self.measurement_name)\n start_time = time.time()\n\n # Special case tight-loop monitoring.\n if not self.interval_ms:\n while True:\n measurement[(time.time() - start_time) * 1000] = self.get_value()\n\n # Helper to take sample, return sample number and sample duration.\n def _take_sample():\n pre_time, value, post_time = time.time(), self.get_value(), time.time()\n measurement[(post_time - start_time) * 1000] = value\n return (int((post_time - start_time) * 1000 / self.interval_ms),\n (post_time - pre_time) * 1000)\n\n # Track the last sample number, and an approximation of the mean time\n # it takes to sample (so we can account for it in how long we sleep).\n last_sample, mean_sample_ms = _take_sample()\n while True:\n # Find what sample number (float) we would be on if we sampled now.\n current_time = time.time()\n new_sample = ((((current_time - start_time) * 1000) + mean_sample_ms) /\n self.interval_ms)\n if new_sample < last_sample + 1:\n time.sleep(start_time - current_time +\n ((last_sample + 1) * self.interval_ms / 1000.0) -\n (mean_sample_ms / 1000.0))\n continue\n elif new_sample > last_sample + 2:\n self.test_state.state_logger.warning(\n 'Monitor for \"%s\" skipping %s sample(s).', self.measurement_name,\n new_sample - last_sample - 1)\n last_sample, cur_sample_ms = _take_sample()\n # Approximate 10-element sliding window average.\n mean_sample_ms = ((9 * mean_sample_ms) + cur_sample_ms) / 10.0\n\n\ndef monitors(\n measurement_name: Text,\n monitor_func: phase_descriptor.PhaseT,\n units: Optional[uom.UnitDescriptor] = None,\n poll_interval_ms: int = 1000\n) -> Callable[[phase_descriptor.PhaseT], phase_descriptor.PhaseDescriptor]:\n \"\"\"Returns a decorator that wraps a phase with a monitor.\"\"\"\n monitor_desc = openhtf.PhaseDescriptor.wrap_or_copy(monitor_func)\n\n def wrapper(\n phase_func: phase_descriptor.PhaseT) -> phase_descriptor.PhaseDescriptor:\n phase_desc = openhtf.PhaseDescriptor.wrap_or_copy(phase_func)\n\n # Re-key this dict so we don't have to worry about collisions with\n # plug.plug() decorators on the phase function. Since we aren't\n # updating kwargs here, we don't have to worry about collisions with\n # kwarg names.\n monitor_plugs = {('_' * idx) + measurement_name + '_monitor': plug.cls\n for idx, plug in enumerate(monitor_desc.plugs, start=1)}\n\n @openhtf.PhaseOptions(requires_state=True)\n @plugs.plug(update_kwargs=False, **monitor_plugs)\n @openhtf.measures(\n measurements.Measurement(measurement_name).with_units(\n units).with_dimensions(uom.MILLISECOND))\n @functools.wraps(phase_desc.func)\n def monitored_phase_func(test_state, *args, **kwargs):\n # Start monitor thread, it will run monitor_desc periodically.\n monitor_thread = _MonitorThread(measurement_name, monitor_desc,\n phase_desc.extra_kwargs, test_state,\n poll_interval_ms)\n monitor_thread.start()\n try:\n return phase_desc(test_state, *args, **kwargs)\n finally:\n monitor_thread.kill()\n monitor_thread.join()\n\n return monitored_phase_func\n\n return wrapper\n",
"path": "openhtf/core/monitors.py"
}
] | [
{
"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Monitors provide a mechanism for periodically collecting a measurement.\n\nMonitors are implemented similar to phase functions - they are decorated\nwith plugs.plug() to pass plugs in. The return value of a monitor\nfunction, however, will be used to append a value to a measurement.\n\nMonitors by default poll at a rate of 1 second between invocations of\nthe monitor function. The poll interval (given in milliseconds) determines the\napproximate frequency at which values will be sampled. A sample is considered\nto have been taken at the time when the monitor function *returns*, not when\nit is called.\n\nThe approximate average duration of calls to the monitor function is taken into\naccount, so that samples are obtained on as close to interval_ms boundaries as\ncan be. A poll interval of 0 will cause the monitor function to be called in a\ntight loop with no delays.\n\nExample:\n\[email protected](current_meter=current_meter.CurrentMeter)\ndef CurrentMonitor(test, current_meter):\n return current_meter.GetReading()\n\[email protected]('current_draw', CurrentMonitor, units=units.AMPERE)\ndef MyPhase(test):\n # Do some stuff for a while...\n\n# MyPhase will have a dimensioned measurement on it, with units of 'AMPERE' and\n# a single dimension of 'MILLISECONDS', and will have values for roughly every\n# second while MyPhase was executing.\n\"\"\"\n\nimport functools\nimport inspect\nimport time\nfrom typing import Any, Callable, Dict, Optional, Text\n\nimport openhtf\nfrom openhtf import plugs\nfrom openhtf.core import measurements\nfrom openhtf.core import phase_descriptor\nfrom openhtf.core import test_state as core_test_state\nfrom openhtf.util import threads\nfrom openhtf.util import units as uom\n\n\nclass _MonitorThread(threads.KillableThread):\n \"\"\"Background thread that runs a monitor.\"\"\"\n\n daemon = True\n\n def __init__(self, measurement_name: Text,\n monitor_desc: phase_descriptor.PhaseDescriptor,\n extra_kwargs: Dict[Any, Any],\n test_state: core_test_state.TestState, interval_ms: int):\n super(_MonitorThread,\n self).__init__(name='%s_MonitorThread' % measurement_name)\n self.measurement_name = measurement_name\n self.monitor_desc = monitor_desc\n self.test_state = test_state\n self.interval_ms = interval_ms\n self.extra_kwargs = extra_kwargs\n\n def get_value(self) -> Any:\n argspec = inspect.getfullargspec(self.monitor_desc.func)\n argspec_args = argspec.args\n argspec_keywords = argspec.varkw\n if argspec_keywords:\n # Monitor phase takes **kwargs, so just pass everything in.\n kwargs = self.extra_kwargs\n else:\n # Only pass in args that the monitor phase takes.\n kwargs = {\n arg: val for arg, val in self.extra_kwargs if arg in argspec_args\n }\n return self.monitor_desc.with_args(**kwargs)(self.test_state)\n\n def _thread_proc(self):\n measurement = getattr(self.test_state.test_api.measurements,\n self.measurement_name)\n start_time = time.time()\n\n # Special case tight-loop monitoring.\n if not self.interval_ms:\n while True:\n measurement[(time.time() - start_time) * 1000] = self.get_value()\n\n # Helper to take sample, return sample number and sample duration.\n def _take_sample():\n pre_time, value, post_time = time.time(), self.get_value(), time.time()\n measurement[(post_time - start_time) * 1000] = value\n return (int((post_time - start_time) * 1000 / self.interval_ms),\n (post_time - pre_time) * 1000)\n\n # Track the last sample number, and an approximation of the mean time\n # it takes to sample (so we can account for it in how long we sleep).\n last_sample, mean_sample_ms = _take_sample()\n while True:\n # Find what sample number (float) we would be on if we sampled now.\n current_time = time.time()\n new_sample = ((((current_time - start_time) * 1000) + mean_sample_ms) /\n self.interval_ms)\n if new_sample < last_sample + 1:\n time.sleep(start_time - current_time +\n ((last_sample + 1) * self.interval_ms / 1000.0) -\n (mean_sample_ms / 1000.0))\n continue\n elif new_sample > last_sample + 2:\n self.test_state.state_logger.warning(\n 'Monitor for \"%s\" skipping %s sample(s).', self.measurement_name,\n new_sample - last_sample - 1)\n last_sample, cur_sample_ms = _take_sample()\n # Approximate 10-element sliding window average.\n mean_sample_ms = ((9 * mean_sample_ms) + cur_sample_ms) / 10.0\n\n\ndef monitors(\n measurement_name: Text,\n monitor_func: phase_descriptor.PhaseT,\n units: Optional[uom.UnitDescriptor] = None,\n poll_interval_ms: int = 1000\n) -> Callable[[phase_descriptor.PhaseT], phase_descriptor.PhaseDescriptor]:\n \"\"\"Returns a decorator that wraps a phase with a monitor.\"\"\"\n monitor_desc = openhtf.PhaseDescriptor.wrap_or_copy(monitor_func)\n\n def wrapper(\n phase_func: phase_descriptor.PhaseT) -> phase_descriptor.PhaseDescriptor:\n phase_desc = openhtf.PhaseDescriptor.wrap_or_copy(phase_func)\n\n # Re-key this dict so we don't have to worry about collisions with\n # plug.plug() decorators on the phase function. Since we aren't\n # updating kwargs here, we don't have to worry about collisions with\n # kwarg names.\n monitor_plugs = {('_' * idx) + measurement_name + '_monitor': plug.cls\n for idx, plug in enumerate(monitor_desc.plugs, start=1)}\n\n @openhtf.PhaseOptions(requires_state=True)\n @plugs.plug(update_kwargs=False, **monitor_plugs)\n @openhtf.measures(\n measurements.Measurement(measurement_name).with_units(\n units).with_dimensions(uom.MILLISECOND))\n @functools.wraps(phase_desc.func)\n def monitored_phase_func(test_state, *args, **kwargs):\n # Start monitor thread, it will run monitor_desc periodically.\n monitor_thread = _MonitorThread(measurement_name, monitor_desc,\n phase_desc.extra_kwargs, test_state,\n poll_interval_ms)\n monitor_thread.start()\n try:\n return phase_desc(test_state, *args, **kwargs)\n finally:\n monitor_thread.kill()\n monitor_thread.join()\n\n return monitored_phase_func\n\n return wrapper\n",
"path": "openhtf/core/monitors.py"
}
] | diff --git a/openhtf/core/monitors.py b/openhtf/core/monitors.py
index 02cc60d1a..2d16ef15d 100644
--- a/openhtf/core/monitors.py
+++ b/openhtf/core/monitors.py
@@ -55,7 +55,6 @@ def MyPhase(test):
from openhtf.core import test_state as core_test_state
from openhtf.util import threads
from openhtf.util import units as uom
-import six
class _MonitorThread(threads.KillableThread):
|
python-trio__trio-727 | Documentation should have previous and next button on top as well as bottom.
I have been going through the trio documentation and I have noticed that the page is really big and if someone just wants to get from one page to either previous or next then he/she will have to scroll down to the bottom of the page then click the next or previous button.
It would be nice to have previous and next button on top as well as on bottom so that anyone can navigate through the documentation more easily.
| [
{
"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Trio documentation build configuration file, created by\n# sphinx-quickstart on Sat Jan 21 19:11:14 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n# For our local_customization module\nsys.path.insert(0, os.path.abspath('.'))\n# For trio itself\nsys.path.insert(0, os.path.abspath('../..'))\n\n# Warn about all references to unknown targets\nnitpicky = True\n# Except for these ones, which we expect to point to unknown targets:\nnitpick_ignore = [\n (\"py:class\", \"CapacityLimiter-like object\"),\n (\"py:class\", \"bytes-like\"),\n (\"py:class\", \"None\"),\n # Was removed but still shows up in changelog\n (\"py:class\", \"trio.hazmat.RunLocal\"),\n # trio.abc is documented at random places scattered throughout the docs\n (\"py:mod\", \"trio.abc\"),\n (\"py:class\", \"math.inf\"),\n]\nautodoc_inherit_docstrings = False\n\n# XX hack the RTD theme until\n# https://github.com/rtfd/sphinx_rtd_theme/pull/382\n# is shipped (should be in the release after 0.2.4)\n# ...note that this has since grown to contain a bunch of other CSS hacks too\n# though.\ndef setup(app):\n app.add_stylesheet(\"hackrtd.css\")\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.coverage',\n 'sphinx.ext.napoleon',\n 'sphinxcontrib_trio',\n 'local_customization',\n]\n\nintersphinx_mapping = {\n \"python\": ('https://docs.python.org/3', None),\n \"outcome\": ('https://outcome.readthedocs.io/en/latest/', None),\n}\n\nautodoc_member_order = \"bysource\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Trio'\ncopyright = '2017, Nathaniel J. Smith'\nauthor = 'Nathaniel J. Smith'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nimport trio\nversion = trio.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# It would be nicer to make this a .png; literally every browser that\n# supports favicons at all now supports png:\n# https://caniuse.com/#feat=link-icon-png\n# But sphinx won't let me:\n# https://github.com/sphinx-doc/sphinx/pull/3715\n# Oh well. 'convert favicon-32.png favicon-32.ico' it is. And it's only 2x\n# bigger...\nhtml_favicon = \"_static/favicon-32.ico\"\nhtml_logo = \"../../logo/wordmark-transparent.svg\"\n# & down below in html_theme_options we set logo_only=True\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'default'\n\nhighlight_language = 'python3'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n#html_theme = 'alabaster'\n\n# We have to set this ourselves, not only because it's useful for local\n# testing, but also because if we don't then RTD will throw away our\n# html_theme_options.\nimport sphinx_rtd_theme\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n # default is 2\n # show deeper nesting in the RTD theme's sidebar TOC\n # https://stackoverflow.com/questions/27669376/\n # I'm not 100% sure this actually does anything with our current\n # versions/settings...\n \"navigation_depth\": 4,\n \"logo_only\": True,\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Triodoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Trio.tex', 'Trio Documentation',\n 'Nathaniel J. Smith', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'trio', 'Trio Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Trio', 'Trio Documentation',\n author, 'Trio', 'One line description of project.',\n 'Miscellaneous'),\n]\n",
"path": "docs/source/conf.py"
}
] | [
{
"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Trio documentation build configuration file, created by\n# sphinx-quickstart on Sat Jan 21 19:11:14 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n# For our local_customization module\nsys.path.insert(0, os.path.abspath('.'))\n# For trio itself\nsys.path.insert(0, os.path.abspath('../..'))\n\n# Warn about all references to unknown targets\nnitpicky = True\n# Except for these ones, which we expect to point to unknown targets:\nnitpick_ignore = [\n (\"py:class\", \"CapacityLimiter-like object\"),\n (\"py:class\", \"bytes-like\"),\n (\"py:class\", \"None\"),\n # Was removed but still shows up in changelog\n (\"py:class\", \"trio.hazmat.RunLocal\"),\n # trio.abc is documented at random places scattered throughout the docs\n (\"py:mod\", \"trio.abc\"),\n (\"py:class\", \"math.inf\"),\n]\nautodoc_inherit_docstrings = False\n\n# XX hack the RTD theme until\n# https://github.com/rtfd/sphinx_rtd_theme/pull/382\n# is shipped (should be in the release after 0.2.4)\n# ...note that this has since grown to contain a bunch of other CSS hacks too\n# though.\ndef setup(app):\n app.add_stylesheet(\"hackrtd.css\")\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.coverage',\n 'sphinx.ext.napoleon',\n 'sphinxcontrib_trio',\n 'local_customization',\n]\n\nintersphinx_mapping = {\n \"python\": ('https://docs.python.org/3', None),\n \"outcome\": ('https://outcome.readthedocs.io/en/latest/', None),\n}\n\nautodoc_member_order = \"bysource\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Trio'\ncopyright = '2017, Nathaniel J. Smith'\nauthor = 'Nathaniel J. Smith'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nimport trio\nversion = trio.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# It would be nicer to make this a .png; literally every browser that\n# supports favicons at all now supports png:\n# https://caniuse.com/#feat=link-icon-png\n# But sphinx won't let me:\n# https://github.com/sphinx-doc/sphinx/pull/3715\n# Oh well. 'convert favicon-32.png favicon-32.ico' it is. And it's only 2x\n# bigger...\nhtml_favicon = \"_static/favicon-32.ico\"\nhtml_logo = \"../../logo/wordmark-transparent.svg\"\n# & down below in html_theme_options we set logo_only=True\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'default'\n\nhighlight_language = 'python3'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n#html_theme = 'alabaster'\n\n# We have to set this ourselves, not only because it's useful for local\n# testing, but also because if we don't then RTD will throw away our\n# html_theme_options.\nimport sphinx_rtd_theme\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n # default is 2\n # show deeper nesting in the RTD theme's sidebar TOC\n # https://stackoverflow.com/questions/27669376/\n # I'm not 100% sure this actually does anything with our current\n # versions/settings...\n \"navigation_depth\": 4,\n \"logo_only\": True,\n 'prev_next_buttons_location': 'both'\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Triodoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Trio.tex', 'Trio Documentation',\n 'Nathaniel J. Smith', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'trio', 'Trio Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Trio', 'Trio Documentation',\n author, 'Trio', 'One line description of project.',\n 'Miscellaneous'),\n]\n",
"path": "docs/source/conf.py"
}
] | diff --git a/docs/source/conf.py b/docs/source/conf.py
index 9d5890b49..bfcec9ad7 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -157,6 +157,7 @@ def setup(app):
# versions/settings...
"navigation_depth": 4,
"logo_only": True,
+ 'prev_next_buttons_location': 'both'
}
# Add any paths that contain custom static files (such as style sheets) here,
|
coala__coala-bears-2811 | HgCommitBear: Make asciinema
| [
{
"content": "import os\nimport shutil\n\nfrom bears.vcs.CommitBear import _CommitBear\nfrom coala_utils.ContextManagers import change_directory\nfrom coalib.misc.Shell import run_shell_command\n\n\nclass HgCommitBear(_CommitBear):\n LANGUAGES = {'Hg'}\n CAN_DETECT = {'Formatting'}\n\n @classmethod\n def check_prerequisites(cls):\n if shutil.which('hg') is None:\n return 'hg is not installed.'\n else:\n return True\n\n def get_remotes():\n remotes, _ = run_shell_command('hg paths')\n return remotes\n\n def get_head_commit(self):\n with change_directory(self.get_config_dir() or os.getcwd()):\n return run_shell_command('hg log -l 1 --template \"{desc}\"')\n",
"path": "bears/vcs/mercurial/HgCommitBear.py"
}
] | [
{
"content": "import os\nimport shutil\n\nfrom bears.vcs.CommitBear import _CommitBear\nfrom coala_utils.ContextManagers import change_directory\nfrom coalib.misc.Shell import run_shell_command\n\n\nclass HgCommitBear(_CommitBear):\n LANGUAGES = {'Hg'}\n CAN_DETECT = {'Formatting'}\n ASCIINEMA_URL = 'https://asciinema.org/a/3Kfn2EDjYLmsbPoL7lRuLyhlN'\n\n @classmethod\n def check_prerequisites(cls):\n if shutil.which('hg') is None:\n return 'hg is not installed.'\n else:\n return True\n\n def get_remotes():\n remotes, _ = run_shell_command('hg paths')\n return remotes\n\n def get_head_commit(self):\n with change_directory(self.get_config_dir() or os.getcwd()):\n return run_shell_command('hg log -l 1 --template \"{desc}\"')\n",
"path": "bears/vcs/mercurial/HgCommitBear.py"
}
] | diff --git a/bears/vcs/mercurial/HgCommitBear.py b/bears/vcs/mercurial/HgCommitBear.py
index f2844c230f..f34237c668 100644
--- a/bears/vcs/mercurial/HgCommitBear.py
+++ b/bears/vcs/mercurial/HgCommitBear.py
@@ -9,6 +9,7 @@
class HgCommitBear(_CommitBear):
LANGUAGES = {'Hg'}
CAN_DETECT = {'Formatting'}
+ ASCIINEMA_URL = 'https://asciinema.org/a/3Kfn2EDjYLmsbPoL7lRuLyhlN'
@classmethod
def check_prerequisites(cls):
|
zulip__zulip-24971 | Show PR review content in GitHub "submitted PR review" events
Here's [an example message](https://chat.zulip.org/#narrow/stream/243-mobile-team/topic/zulip-flutter/near/1523925) from the GitHub bot:
> gnprice submitted [PR review for #25 Add personal user_id & full_name data, updating latter via events & in UI](https://github.com/zulip/zulip-flutter/pull/25#pullrequestreview-1338016589).
That's well and good as far as it goes. But that PR review had a body in it, with some text. That text is an important part of the review, so the GitHub bot should include it in its message.
| [
{
"content": "import re\nfrom functools import partial\nfrom typing import Callable, Dict, Optional\n\nfrom django.http import HttpRequest, HttpResponse\n\nfrom zerver.decorator import log_unsupported_webhook_event, webhook_view\nfrom zerver.lib.exceptions import UnsupportedWebhookEventTypeError\nfrom zerver.lib.request import REQ, has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.validator import (\n WildValue,\n check_bool,\n check_int,\n check_none_or,\n check_string,\n to_wild_value,\n)\nfrom zerver.lib.webhooks.common import (\n check_send_webhook_message,\n get_http_headers_from_filename,\n get_setup_webhook_message,\n validate_extract_webhook_http_header,\n)\nfrom zerver.lib.webhooks.git import (\n CONTENT_MESSAGE_TEMPLATE,\n TOPIC_WITH_BRANCH_TEMPLATE,\n TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE,\n get_commits_comment_action_message,\n get_issue_event_message,\n get_pull_request_event_message,\n get_push_commits_event_message,\n get_push_tag_event_message,\n get_release_event_message,\n get_short_sha,\n)\nfrom zerver.models import UserProfile\n\nfixture_to_headers = get_http_headers_from_filename(\"HTTP_X_GITHUB_EVENT\")\n\nTOPIC_FOR_DISCUSSION = \"{repo} discussion #{number}: {title}\"\nDISCUSSION_TEMPLATE = \"{author} created [discussion #{discussion_id}]({url}) in {category}:\\n```quote\\n### {title}\\n{body}\\n```\"\nDISCUSSION_COMMENT_TEMPLATE = \"{author} [commented]({comment_url}) on [discussion #{discussion_id}]({discussion_url}):\\n```quote\\n{body}\\n```\"\n\n\nclass Helper:\n def __init__(\n self,\n payload: WildValue,\n include_title: bool,\n ) -> None:\n self.payload = payload\n self.include_title = include_title\n\n def log_unsupported(self, event: str) -> None:\n summary = f\"The '{event}' event isn't currently supported by the GitHub webhook\"\n log_unsupported_webhook_event(\n summary=summary,\n )\n\n\ndef get_opened_or_update_pull_request_body(helper: Helper) -> str:\n payload = helper.payload\n include_title = helper.include_title\n pull_request = payload[\"pull_request\"]\n action = payload[\"action\"].tame(check_string)\n if action == \"synchronize\":\n action = \"updated\"\n assignee = None\n if pull_request.get(\"assignee\"):\n assignee = pull_request[\"assignee\"][\"login\"].tame(check_string)\n description = None\n changes = payload.get(\"changes\", {})\n if \"body\" in changes or action == \"opened\":\n description = pull_request[\"body\"].tame(check_none_or(check_string))\n target_branch = None\n base_branch = None\n if action == \"opened\" or action == \"merged\":\n target_branch = pull_request[\"head\"][\"label\"].tame(check_string)\n base_branch = pull_request[\"base\"][\"label\"].tame(check_string)\n\n return get_pull_request_event_message(\n user_name=get_sender_name(payload),\n action=action,\n url=pull_request[\"html_url\"].tame(check_string),\n target_branch=target_branch,\n base_branch=base_branch,\n message=description,\n assignee=assignee,\n number=pull_request[\"number\"].tame(check_int),\n title=pull_request[\"title\"].tame(check_string) if include_title else None,\n )\n\n\ndef get_assigned_or_unassigned_pull_request_body(helper: Helper) -> str:\n payload = helper.payload\n include_title = helper.include_title\n pull_request = payload[\"pull_request\"]\n assignee = pull_request.get(\"assignee\")\n if assignee:\n stringified_assignee = assignee[\"login\"].tame(check_string)\n\n base_message = get_pull_request_event_message(\n user_name=get_sender_name(payload),\n action=payload[\"action\"].tame(check_string),\n url=pull_request[\"html_url\"].tame(check_string),\n number=pull_request[\"number\"].tame(check_int),\n title=pull_request[\"title\"].tame(check_string) if include_title else None,\n )\n if assignee:\n return f\"{base_message[:-1]} to {stringified_assignee}.\"\n return base_message\n\n\ndef get_closed_pull_request_body(helper: Helper) -> str:\n payload = helper.payload\n include_title = helper.include_title\n pull_request = payload[\"pull_request\"]\n action = \"merged\" if pull_request[\"merged\"].tame(check_bool) else \"closed without merge\"\n return get_pull_request_event_message(\n user_name=get_sender_name(payload),\n action=action,\n url=pull_request[\"html_url\"].tame(check_string),\n number=pull_request[\"number\"].tame(check_int),\n title=pull_request[\"title\"].tame(check_string) if include_title else None,\n )\n\n\ndef get_membership_body(helper: Helper) -> str:\n payload = helper.payload\n action = payload[\"action\"].tame(check_string)\n member = payload[\"member\"]\n team_name = payload[\"team\"][\"name\"].tame(check_string)\n\n return \"{sender} {action} [{username}]({html_url}) {preposition} the {team_name} team.\".format(\n sender=get_sender_name(payload),\n action=action,\n username=member[\"login\"].tame(check_string),\n html_url=member[\"html_url\"].tame(check_string),\n preposition=\"from\" if action == \"removed\" else \"to\",\n team_name=team_name,\n )\n\n\ndef get_member_body(helper: Helper) -> str:\n payload = helper.payload\n return \"{} {} [{}]({}) to [{}]({}).\".format(\n get_sender_name(payload),\n payload[\"action\"].tame(check_string),\n payload[\"member\"][\"login\"].tame(check_string),\n payload[\"member\"][\"html_url\"].tame(check_string),\n get_repository_name(payload),\n payload[\"repository\"][\"html_url\"].tame(check_string),\n )\n\n\ndef get_issue_body(helper: Helper) -> str:\n payload = helper.payload\n include_title = helper.include_title\n action = payload[\"action\"].tame(check_string)\n issue = payload[\"issue\"]\n assignee = issue[\"assignee\"]\n return get_issue_event_message(\n user_name=get_sender_name(payload),\n action=action,\n url=issue[\"html_url\"].tame(check_string),\n number=issue[\"number\"].tame(check_int),\n message=issue[\"body\"].tame(check_none_or(check_string)),\n assignee=assignee[\"login\"].tame(check_string) if assignee else None,\n title=issue[\"title\"].tame(check_string) if include_title else None,\n )\n\n\ndef get_issue_comment_body(helper: Helper) -> str:\n payload = helper.payload\n include_title = helper.include_title\n action = payload[\"action\"].tame(check_string)\n comment = payload[\"comment\"]\n issue = payload[\"issue\"]\n\n if action == \"created\":\n action = \"[commented]\"\n else:\n action = f\"{action} a [comment]\"\n action += \"({}) on\".format(comment[\"html_url\"].tame(check_string))\n\n return get_issue_event_message(\n user_name=get_sender_name(payload),\n action=action,\n url=issue[\"html_url\"].tame(check_string),\n number=issue[\"number\"].tame(check_int),\n message=comment[\"body\"].tame(check_string),\n title=issue[\"title\"].tame(check_string) if include_title else None,\n )\n\n\ndef get_fork_body(helper: Helper) -> str:\n payload = helper.payload\n forkee = payload[\"forkee\"]\n return \"{} forked [{}]({}).\".format(\n get_sender_name(payload),\n forkee[\"name\"].tame(check_string),\n forkee[\"html_url\"].tame(check_string),\n )\n\n\ndef get_deployment_body(helper: Helper) -> str:\n payload = helper.payload\n return f\"{get_sender_name(payload)} created new deployment.\"\n\n\ndef get_change_deployment_status_body(helper: Helper) -> str:\n payload = helper.payload\n return \"Deployment changed status to {}.\".format(\n payload[\"deployment_status\"][\"state\"].tame(check_string),\n )\n\n\ndef get_create_or_delete_body(helper: Helper, action: str) -> str:\n payload = helper.payload\n ref_type = payload[\"ref_type\"].tame(check_string)\n return \"{} {} {} {}.\".format(\n get_sender_name(payload),\n action,\n ref_type,\n payload[\"ref\"].tame(check_string),\n ).rstrip()\n\n\ndef get_commit_comment_body(helper: Helper) -> str:\n payload = helper.payload\n comment = payload[\"comment\"]\n comment_url = comment[\"html_url\"].tame(check_string)\n commit_url = comment_url.split(\"#\", 1)[0]\n action = f\"[commented]({comment_url})\"\n return get_commits_comment_action_message(\n get_sender_name(payload),\n action,\n commit_url,\n comment[\"commit_id\"].tame(check_string),\n comment[\"body\"].tame(check_string),\n )\n\n\ndef get_push_tags_body(helper: Helper) -> str:\n payload = helper.payload\n return get_push_tag_event_message(\n get_sender_name(payload),\n get_tag_name_from_ref(payload[\"ref\"].tame(check_string)),\n action=\"pushed\" if payload[\"created\"].tame(check_bool) else \"removed\",\n )\n\n\ndef get_push_commits_body(helper: Helper) -> str:\n payload = helper.payload\n commits_data = []\n for commit in payload[\"commits\"]:\n if commit[\"author\"].get(\"username\"):\n name = commit[\"author\"][\"username\"].tame(check_string)\n else:\n name = commit[\"author\"][\"name\"].tame(check_string)\n commits_data.append(\n {\n \"name\": name,\n \"sha\": commit[\"id\"].tame(check_string),\n \"url\": commit[\"url\"].tame(check_string),\n \"message\": commit[\"message\"].tame(check_string),\n }\n )\n return get_push_commits_event_message(\n get_sender_name(payload),\n payload[\"compare\"].tame(check_string),\n get_branch_name_from_ref(payload[\"ref\"].tame(check_string)),\n commits_data,\n deleted=payload[\"deleted\"].tame(check_bool),\n )\n\n\ndef get_discussion_body(helper: Helper) -> str:\n payload = helper.payload\n return DISCUSSION_TEMPLATE.format(\n author=get_sender_name(payload),\n url=payload[\"discussion\"][\"html_url\"].tame(check_string),\n body=payload[\"discussion\"][\"body\"].tame(check_string),\n category=payload[\"discussion\"][\"category\"][\"name\"].tame(check_string),\n discussion_id=payload[\"discussion\"][\"number\"].tame(check_int),\n title=payload[\"discussion\"][\"title\"].tame(check_string),\n )\n\n\ndef get_discussion_comment_body(helper: Helper) -> str:\n payload = helper.payload\n return DISCUSSION_COMMENT_TEMPLATE.format(\n author=get_sender_name(payload),\n body=payload[\"comment\"][\"body\"].tame(check_string),\n discussion_url=payload[\"discussion\"][\"html_url\"].tame(check_string),\n comment_url=payload[\"comment\"][\"html_url\"].tame(check_string),\n discussion_id=payload[\"discussion\"][\"number\"].tame(check_int),\n )\n\n\ndef get_public_body(helper: Helper) -> str:\n payload = helper.payload\n return \"{} made the repository [{}]({}) public.\".format(\n get_sender_name(payload),\n get_repository_full_name(payload),\n payload[\"repository\"][\"html_url\"].tame(check_string),\n )\n\n\ndef get_wiki_pages_body(helper: Helper) -> str:\n payload = helper.payload\n wiki_page_info_template = \"* {action} [{title}]({url})\\n\"\n wiki_info = \"\"\n for page in payload[\"pages\"]:\n wiki_info += wiki_page_info_template.format(\n action=page[\"action\"].tame(check_string),\n title=page[\"title\"].tame(check_string),\n url=page[\"html_url\"].tame(check_string),\n )\n return f\"{get_sender_name(payload)}:\\n{wiki_info.rstrip()}\"\n\n\ndef get_watch_body(helper: Helper) -> str:\n payload = helper.payload\n return \"{} starred the repository [{}]({}).\".format(\n get_sender_name(payload),\n get_repository_full_name(payload),\n payload[\"repository\"][\"html_url\"].tame(check_string),\n )\n\n\ndef get_repository_body(helper: Helper) -> str:\n payload = helper.payload\n return \"{} {} the repository [{}]({}).\".format(\n get_sender_name(payload),\n payload[\"action\"].tame(check_string),\n get_repository_full_name(payload),\n payload[\"repository\"][\"html_url\"].tame(check_string),\n )\n\n\ndef get_add_team_body(helper: Helper) -> str:\n payload = helper.payload\n return \"The repository [{}]({}) was added to team {}.\".format(\n get_repository_full_name(payload),\n payload[\"repository\"][\"html_url\"].tame(check_string),\n payload[\"team\"][\"name\"].tame(check_string),\n )\n\n\ndef get_team_body(helper: Helper) -> str:\n payload = helper.payload\n changes = payload[\"changes\"]\n if \"description\" in changes:\n actor = payload[\"sender\"][\"login\"].tame(check_string)\n new_description = payload[\"team\"][\"description\"].tame(check_string)\n return f\"**{actor}** changed the team description to:\\n```quote\\n{new_description}\\n```\"\n if \"name\" in changes:\n original_name = changes[\"name\"][\"from\"].tame(check_string)\n new_name = payload[\"team\"][\"name\"].tame(check_string)\n return f\"Team `{original_name}` was renamed to `{new_name}`.\"\n if \"privacy\" in changes:\n new_visibility = payload[\"team\"][\"privacy\"].tame(check_string)\n return f\"Team visibility changed to `{new_visibility}`\"\n\n missing_keys = \"/\".join(sorted(changes.keys()))\n helper.log_unsupported(f\"team/edited (changes: {missing_keys})\")\n\n # Do our best to give useful info to the customer--at least\n # if they know something changed, they can go to GitHub for\n # more details. And if it's just spam, you can control that\n # from GitHub.\n return f\"Team has changes to `{missing_keys}` data.\"\n\n\ndef get_release_body(helper: Helper) -> str:\n payload = helper.payload\n if payload[\"release\"][\"name\"]:\n release_name = payload[\"release\"][\"name\"].tame(check_string)\n else:\n release_name = payload[\"release\"][\"tag_name\"].tame(check_string)\n data = {\n \"user_name\": get_sender_name(payload),\n \"action\": payload[\"action\"].tame(check_string),\n \"tagname\": payload[\"release\"][\"tag_name\"].tame(check_string),\n # Not every GitHub release has a \"name\" set; if not there, use the tag name.\n \"release_name\": release_name,\n \"url\": payload[\"release\"][\"html_url\"].tame(check_string),\n }\n\n return get_release_event_message(**data)\n\n\ndef get_page_build_body(helper: Helper) -> str:\n payload = helper.payload\n build = payload[\"build\"]\n status = build[\"status\"].tame(check_string)\n actions = {\n \"null\": \"has yet to be built\",\n \"building\": \"is being built\",\n \"errored\": \"has failed: {}\",\n \"built\": \"has finished building\",\n }\n\n action = actions.get(status, f\"is {status}\")\n if build[\"error\"][\"message\"]:\n action = action.format(\n CONTENT_MESSAGE_TEMPLATE.format(message=build[\"error\"][\"message\"].tame(check_string)),\n )\n\n return \"GitHub Pages build, triggered by {}, {}.\".format(\n payload[\"build\"][\"pusher\"][\"login\"].tame(check_string),\n action,\n )\n\n\ndef get_status_body(helper: Helper) -> str:\n payload = helper.payload\n if payload[\"target_url\"]:\n status = \"[{}]({})\".format(\n payload[\"state\"].tame(check_string),\n payload[\"target_url\"].tame(check_string),\n )\n else:\n status = payload[\"state\"].tame(check_string)\n return \"[{}]({}) changed its status to {}.\".format(\n get_short_sha(payload[\"sha\"].tame(check_string)),\n payload[\"commit\"][\"html_url\"].tame(check_string),\n status,\n )\n\n\ndef get_locked_or_unlocked_pull_request_body(helper: Helper) -> str:\n payload = helper.payload\n\n action = payload[\"action\"].tame(check_string)\n\n message = \"{sender} has locked [PR #{pr_number}]({pr_url}) as {reason} and limited conversation to collaborators.\"\n if action == \"unlocked\":\n message = \"{sender} has unlocked [PR #{pr_number}]({pr_url}).\"\n if payload[\"pull_request\"][\"active_lock_reason\"]:\n active_lock_reason = payload[\"pull_request\"][\"active_lock_reason\"].tame(check_string)\n else:\n active_lock_reason = None\n return message.format(\n sender=get_sender_name(payload),\n pr_number=payload[\"pull_request\"][\"number\"].tame(check_int),\n pr_url=payload[\"pull_request\"][\"html_url\"].tame(check_string),\n reason=active_lock_reason,\n )\n\n\ndef get_pull_request_auto_merge_body(helper: Helper) -> str:\n payload = helper.payload\n\n action = payload[\"action\"].tame(check_string)\n\n message = \"{sender} has enabled auto merge for [PR #{pr_number}]({pr_url}).\"\n if action == \"auto_merge_disabled\":\n message = \"{sender} has disabled auto merge for [PR #{pr_number}]({pr_url}).\"\n return message.format(\n sender=get_sender_name(payload),\n pr_number=payload[\"pull_request\"][\"number\"].tame(check_int),\n pr_url=payload[\"pull_request\"][\"html_url\"].tame(check_string),\n )\n\n\ndef get_pull_request_ready_for_review_body(helper: Helper) -> str:\n payload = helper.payload\n\n message = \"**{sender}** has marked [PR #{pr_number}]({pr_url}) as ready for review.\"\n return message.format(\n sender=get_sender_name(payload),\n pr_number=payload[\"pull_request\"][\"number\"].tame(check_int),\n pr_url=payload[\"pull_request\"][\"html_url\"].tame(check_string),\n )\n\n\ndef get_pull_request_review_body(helper: Helper) -> str:\n payload = helper.payload\n include_title = helper.include_title\n title = \"for #{} {}\".format(\n payload[\"pull_request\"][\"number\"].tame(check_int),\n payload[\"pull_request\"][\"title\"].tame(check_string),\n )\n return get_pull_request_event_message(\n user_name=get_sender_name(payload),\n action=\"submitted\",\n url=payload[\"review\"][\"html_url\"].tame(check_string),\n type=\"PR review\",\n title=title if include_title else None,\n )\n\n\ndef get_pull_request_review_comment_body(helper: Helper) -> str:\n payload = helper.payload\n include_title = helper.include_title\n action = payload[\"action\"].tame(check_string)\n message = None\n if action == \"created\":\n message = payload[\"comment\"][\"body\"].tame(check_string)\n\n title = \"on #{} {}\".format(\n payload[\"pull_request\"][\"number\"].tame(check_int),\n payload[\"pull_request\"][\"title\"].tame(check_string),\n )\n\n return get_pull_request_event_message(\n user_name=get_sender_name(payload),\n action=action,\n url=payload[\"comment\"][\"html_url\"].tame(check_string),\n message=message,\n type=\"PR review comment\",\n title=title if include_title else None,\n )\n\n\ndef get_pull_request_review_requested_body(helper: Helper) -> str:\n payload = helper.payload\n include_title = helper.include_title\n requested_reviewer = [payload[\"requested_reviewer\"]] if \"requested_reviewer\" in payload else []\n\n requested_team = [payload[\"requested_team\"]] if \"requested_team\" in payload else []\n\n sender = get_sender_name(payload)\n pr_number = payload[\"pull_request\"][\"number\"].tame(check_int)\n pr_url = payload[\"pull_request\"][\"html_url\"].tame(check_string)\n message = \"**{sender}** requested {reviewers} for a review on [PR #{pr_number}]({pr_url}).\"\n message_with_title = (\n \"**{sender}** requested {reviewers} for a review on [PR #{pr_number} {title}]({pr_url}).\"\n )\n body = message_with_title if include_title else message\n\n all_reviewers = []\n\n for reviewer in requested_reviewer:\n all_reviewers.append(\n \"[{login}]({html_url})\".format(\n login=reviewer[\"login\"].tame(check_string),\n html_url=reviewer[\"html_url\"].tame(check_string),\n )\n )\n\n for team_reviewer in requested_team:\n all_reviewers.append(\n \"[{name}]({html_url})\".format(\n name=team_reviewer[\"name\"].tame(check_string),\n html_url=team_reviewer[\"html_url\"].tame(check_string),\n )\n )\n\n reviewers = \"\"\n reviewers = all_reviewers[0]\n\n return body.format(\n sender=sender,\n reviewers=reviewers,\n pr_number=pr_number,\n pr_url=pr_url,\n title=payload[\"pull_request\"][\"title\"].tame(check_string) if include_title else None,\n )\n\n\ndef get_check_run_body(helper: Helper) -> str:\n payload = helper.payload\n template = \"\"\"\nCheck [{name}]({html_url}) {status} ({conclusion}). ([{short_hash}]({commit_url}))\n\"\"\".strip()\n\n kwargs = {\n \"name\": payload[\"check_run\"][\"name\"].tame(check_string),\n \"html_url\": payload[\"check_run\"][\"html_url\"].tame(check_string),\n \"status\": payload[\"check_run\"][\"status\"].tame(check_string),\n \"short_hash\": get_short_sha(payload[\"check_run\"][\"head_sha\"].tame(check_string)),\n \"commit_url\": \"{}/commit/{}\".format(\n payload[\"repository\"][\"html_url\"].tame(check_string),\n payload[\"check_run\"][\"head_sha\"].tame(check_string),\n ),\n \"conclusion\": payload[\"check_run\"][\"conclusion\"].tame(check_string),\n }\n\n return template.format(**kwargs)\n\n\ndef get_star_body(helper: Helper) -> str:\n payload = helper.payload\n template = \"{user} {action} the repository [{repo}]({url}).\"\n return template.format(\n user=payload[\"sender\"][\"login\"].tame(check_string),\n action=\"starred\" if payload[\"action\"].tame(check_string) == \"created\" else \"unstarred\",\n repo=get_repository_full_name(payload),\n url=payload[\"repository\"][\"html_url\"].tame(check_string),\n )\n\n\ndef get_ping_body(helper: Helper) -> str:\n payload = helper.payload\n return get_setup_webhook_message(\"GitHub\", get_sender_name(payload))\n\n\ndef get_repository_name(payload: WildValue) -> str:\n return payload[\"repository\"][\"name\"].tame(check_string)\n\n\ndef get_repository_full_name(payload: WildValue) -> str:\n return payload[\"repository\"][\"full_name\"].tame(check_string)\n\n\ndef get_organization_name(payload: WildValue) -> str:\n return payload[\"organization\"][\"login\"].tame(check_string)\n\n\ndef get_sender_name(payload: WildValue) -> str:\n return payload[\"sender\"][\"login\"].tame(check_string)\n\n\ndef get_branch_name_from_ref(ref_string: str) -> str:\n return re.sub(r\"^refs/heads/\", \"\", ref_string)\n\n\ndef get_tag_name_from_ref(ref_string: str) -> str:\n return re.sub(r\"^refs/tags/\", \"\", ref_string)\n\n\ndef is_commit_push_event(payload: WildValue) -> bool:\n return bool(re.match(r\"^refs/heads/\", payload[\"ref\"].tame(check_string)))\n\n\ndef get_subject_based_on_type(payload: WildValue, event: str) -> str:\n if \"pull_request\" in event:\n return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(\n repo=get_repository_name(payload),\n type=\"PR\",\n id=payload[\"pull_request\"][\"number\"].tame(check_int),\n title=payload[\"pull_request\"][\"title\"].tame(check_string),\n )\n elif event.startswith(\"issue\"):\n return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(\n repo=get_repository_name(payload),\n type=\"issue\",\n id=payload[\"issue\"][\"number\"].tame(check_int),\n title=payload[\"issue\"][\"title\"].tame(check_string),\n )\n elif event.startswith(\"deployment\"):\n return \"{} / Deployment on {}\".format(\n get_repository_name(payload),\n payload[\"deployment\"][\"environment\"].tame(check_string),\n )\n elif event == \"membership\":\n return \"{} organization\".format(payload[\"organization\"][\"login\"].tame(check_string))\n elif event == \"team\":\n return \"team {}\".format(payload[\"team\"][\"name\"].tame(check_string))\n elif event == \"push_commits\":\n return TOPIC_WITH_BRANCH_TEMPLATE.format(\n repo=get_repository_name(payload),\n branch=get_branch_name_from_ref(payload[\"ref\"].tame(check_string)),\n )\n elif event == \"gollum\":\n return TOPIC_WITH_BRANCH_TEMPLATE.format(\n repo=get_repository_name(payload),\n branch=\"wiki pages\",\n )\n elif event == \"ping\":\n if not payload.get(\"repository\"):\n return get_organization_name(payload)\n elif event == \"check_run\":\n return f\"{get_repository_name(payload)} / checks\"\n elif event.startswith(\"discussion\"):\n return TOPIC_FOR_DISCUSSION.format(\n repo=get_repository_name(payload),\n number=payload[\"discussion\"][\"number\"].tame(check_int),\n title=payload[\"discussion\"][\"title\"].tame(check_string),\n )\n\n return get_repository_name(payload)\n\n\nEVENT_FUNCTION_MAPPER: Dict[str, Callable[[Helper], str]] = {\n \"commit_comment\": get_commit_comment_body,\n \"closed_pull_request\": get_closed_pull_request_body,\n \"create\": partial(get_create_or_delete_body, action=\"created\"),\n \"check_run\": get_check_run_body,\n \"delete\": partial(get_create_or_delete_body, action=\"deleted\"),\n \"deployment\": get_deployment_body,\n \"deployment_status\": get_change_deployment_status_body,\n \"discussion\": get_discussion_body,\n \"discussion_comment\": get_discussion_comment_body,\n \"fork\": get_fork_body,\n \"gollum\": get_wiki_pages_body,\n \"issue_comment\": get_issue_comment_body,\n \"issues\": get_issue_body,\n \"member\": get_member_body,\n \"membership\": get_membership_body,\n \"opened_or_update_pull_request\": get_opened_or_update_pull_request_body,\n \"assigned_or_unassigned_pull_request\": get_assigned_or_unassigned_pull_request_body,\n \"page_build\": get_page_build_body,\n \"ping\": get_ping_body,\n \"public\": get_public_body,\n \"pull_request_ready_for_review\": get_pull_request_ready_for_review_body,\n \"pull_request_review\": get_pull_request_review_body,\n \"pull_request_review_comment\": get_pull_request_review_comment_body,\n \"pull_request_review_requested\": get_pull_request_review_requested_body,\n \"pull_request_auto_merge\": get_pull_request_auto_merge_body,\n \"locked_or_unlocked_pull_request\": get_locked_or_unlocked_pull_request_body,\n \"push_commits\": get_push_commits_body,\n \"push_tags\": get_push_tags_body,\n \"release\": get_release_body,\n \"repository\": get_repository_body,\n \"star\": get_star_body,\n \"status\": get_status_body,\n \"team\": get_team_body,\n \"team_add\": get_add_team_body,\n \"watch\": get_watch_body,\n}\n\nIGNORED_EVENTS = [\n \"check_suite\",\n \"label\",\n \"meta\",\n \"milestone\",\n \"organization\",\n \"project_card\",\n \"repository_vulnerability_alert\",\n]\n\nIGNORED_PULL_REQUEST_ACTIONS = [\n \"approved\",\n \"converted_to_draft\",\n \"labeled\",\n \"review_request_removed\",\n \"unlabeled\",\n]\n\nIGNORED_TEAM_ACTIONS = [\n # These are actions that are well documented by github\n # (https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads)\n # but we ignore them for now, possibly just due to laziness.\n # One curious example here is team/added_to_repository, which is\n # possibly the same as team_add.\n \"added_to_repository\",\n \"created\",\n \"deleted\",\n \"removed_from_repository\",\n]\n\nALL_EVENT_TYPES = list(EVENT_FUNCTION_MAPPER.keys())\n\n\n@webhook_view(\"GitHub\", notify_bot_owner_on_invalid_json=True, all_event_types=ALL_EVENT_TYPES)\n@has_request_variables\ndef api_github_webhook(\n request: HttpRequest,\n user_profile: UserProfile,\n payload: WildValue = REQ(argument_type=\"body\", converter=to_wild_value),\n branches: Optional[str] = REQ(default=None),\n user_specified_topic: Optional[str] = REQ(\"topic\", default=None),\n) -> HttpResponse:\n \"\"\"\n GitHub sends the event as an HTTP header. We have our\n own Zulip-specific concept of an event that often maps\n directly to the X-GitHub-Event header's event, but we sometimes\n refine it based on the payload.\n \"\"\"\n header_event = validate_extract_webhook_http_header(request, \"X-GitHub-Event\", \"GitHub\")\n if header_event is None:\n raise UnsupportedWebhookEventTypeError(\"no header provided\")\n\n event = get_zulip_event_name(header_event, payload, branches)\n if event is None:\n # This is nothing to worry about--get_event() returns None\n # for events that are valid but not yet handled by us.\n # See IGNORED_EVENTS, for example.\n return json_success(request)\n subject = get_subject_based_on_type(payload, event)\n\n body_function = EVENT_FUNCTION_MAPPER[event]\n\n helper = Helper(\n payload=payload,\n include_title=user_specified_topic is not None,\n )\n body = body_function(helper)\n\n check_send_webhook_message(request, user_profile, subject, body, event)\n return json_success(request)\n\n\ndef get_zulip_event_name(\n header_event: str,\n payload: WildValue,\n branches: Optional[str],\n) -> Optional[str]:\n \"\"\"\n Usually, we return an event name that is a key in EVENT_FUNCTION_MAPPER.\n\n We return None for an event that we know we don't want to handle.\n \"\"\"\n if header_event == \"pull_request\":\n action = payload[\"action\"].tame(check_string)\n if action in (\"opened\", \"synchronize\", \"reopened\", \"edited\"):\n return \"opened_or_update_pull_request\"\n if action in (\"assigned\", \"unassigned\"):\n return \"assigned_or_unassigned_pull_request\"\n if action == \"closed\":\n return \"closed_pull_request\"\n if action == \"review_requested\":\n return \"pull_request_review_requested\"\n if action == \"ready_for_review\":\n return \"pull_request_ready_for_review\"\n if action in (\"locked\", \"unlocked\"):\n return \"locked_or_unlocked_pull_request\"\n if action in (\"auto_merge_enabled\", \"auto_merge_disabled\"):\n return \"pull_request_auto_merge\"\n if action in IGNORED_PULL_REQUEST_ACTIONS:\n return None\n elif header_event == \"push\":\n if is_commit_push_event(payload):\n if branches is not None:\n branch = get_branch_name_from_ref(payload[\"ref\"].tame(check_string))\n if branches.find(branch) == -1:\n return None\n return \"push_commits\"\n else:\n return \"push_tags\"\n elif header_event == \"check_run\":\n if payload[\"check_run\"][\"status\"].tame(check_string) != \"completed\":\n return None\n return header_event\n elif header_event == \"team\":\n action = payload[\"action\"].tame(check_string)\n if action == \"edited\":\n return \"team\"\n if action in IGNORED_TEAM_ACTIONS:\n # no need to spam our logs, we just haven't implemented it yet\n return None\n else:\n # this means GH has actually added new actions since September 2020,\n # so it's a bit more cause for alarm\n raise UnsupportedWebhookEventTypeError(f\"unsupported team action {action}\")\n elif header_event in list(EVENT_FUNCTION_MAPPER.keys()):\n return header_event\n elif header_event in IGNORED_EVENTS:\n return None\n\n complete_event = \"{}:{}\".format(\n header_event, payload.get(\"action\", \"???\").tame(check_string)\n ) # nocoverage\n raise UnsupportedWebhookEventTypeError(complete_event)\n",
"path": "zerver/webhooks/github/view.py"
}
] | [
{
"content": "import re\nfrom functools import partial\nfrom typing import Callable, Dict, Optional\n\nfrom django.http import HttpRequest, HttpResponse\n\nfrom zerver.decorator import log_unsupported_webhook_event, webhook_view\nfrom zerver.lib.exceptions import UnsupportedWebhookEventTypeError\nfrom zerver.lib.request import REQ, has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.validator import (\n WildValue,\n check_bool,\n check_int,\n check_none_or,\n check_string,\n to_wild_value,\n)\nfrom zerver.lib.webhooks.common import (\n check_send_webhook_message,\n get_http_headers_from_filename,\n get_setup_webhook_message,\n validate_extract_webhook_http_header,\n)\nfrom zerver.lib.webhooks.git import (\n CONTENT_MESSAGE_TEMPLATE,\n TOPIC_WITH_BRANCH_TEMPLATE,\n TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE,\n get_commits_comment_action_message,\n get_issue_event_message,\n get_pull_request_event_message,\n get_push_commits_event_message,\n get_push_tag_event_message,\n get_release_event_message,\n get_short_sha,\n)\nfrom zerver.models import UserProfile\n\nfixture_to_headers = get_http_headers_from_filename(\"HTTP_X_GITHUB_EVENT\")\n\nTOPIC_FOR_DISCUSSION = \"{repo} discussion #{number}: {title}\"\nDISCUSSION_TEMPLATE = \"{author} created [discussion #{discussion_id}]({url}) in {category}:\\n```quote\\n### {title}\\n{body}\\n```\"\nDISCUSSION_COMMENT_TEMPLATE = \"{author} [commented]({comment_url}) on [discussion #{discussion_id}]({discussion_url}):\\n```quote\\n{body}\\n```\"\n\n\nclass Helper:\n def __init__(\n self,\n payload: WildValue,\n include_title: bool,\n ) -> None:\n self.payload = payload\n self.include_title = include_title\n\n def log_unsupported(self, event: str) -> None:\n summary = f\"The '{event}' event isn't currently supported by the GitHub webhook\"\n log_unsupported_webhook_event(\n summary=summary,\n )\n\n\ndef get_opened_or_update_pull_request_body(helper: Helper) -> str:\n payload = helper.payload\n include_title = helper.include_title\n pull_request = payload[\"pull_request\"]\n action = payload[\"action\"].tame(check_string)\n if action == \"synchronize\":\n action = \"updated\"\n assignee = None\n if pull_request.get(\"assignee\"):\n assignee = pull_request[\"assignee\"][\"login\"].tame(check_string)\n description = None\n changes = payload.get(\"changes\", {})\n if \"body\" in changes or action == \"opened\":\n description = pull_request[\"body\"].tame(check_none_or(check_string))\n target_branch = None\n base_branch = None\n if action == \"opened\" or action == \"merged\":\n target_branch = pull_request[\"head\"][\"label\"].tame(check_string)\n base_branch = pull_request[\"base\"][\"label\"].tame(check_string)\n\n return get_pull_request_event_message(\n user_name=get_sender_name(payload),\n action=action,\n url=pull_request[\"html_url\"].tame(check_string),\n target_branch=target_branch,\n base_branch=base_branch,\n message=description,\n assignee=assignee,\n number=pull_request[\"number\"].tame(check_int),\n title=pull_request[\"title\"].tame(check_string) if include_title else None,\n )\n\n\ndef get_assigned_or_unassigned_pull_request_body(helper: Helper) -> str:\n payload = helper.payload\n include_title = helper.include_title\n pull_request = payload[\"pull_request\"]\n assignee = pull_request.get(\"assignee\")\n if assignee:\n stringified_assignee = assignee[\"login\"].tame(check_string)\n\n base_message = get_pull_request_event_message(\n user_name=get_sender_name(payload),\n action=payload[\"action\"].tame(check_string),\n url=pull_request[\"html_url\"].tame(check_string),\n number=pull_request[\"number\"].tame(check_int),\n title=pull_request[\"title\"].tame(check_string) if include_title else None,\n )\n if assignee:\n return f\"{base_message[:-1]} to {stringified_assignee}.\"\n return base_message\n\n\ndef get_closed_pull_request_body(helper: Helper) -> str:\n payload = helper.payload\n include_title = helper.include_title\n pull_request = payload[\"pull_request\"]\n action = \"merged\" if pull_request[\"merged\"].tame(check_bool) else \"closed without merge\"\n return get_pull_request_event_message(\n user_name=get_sender_name(payload),\n action=action,\n url=pull_request[\"html_url\"].tame(check_string),\n number=pull_request[\"number\"].tame(check_int),\n title=pull_request[\"title\"].tame(check_string) if include_title else None,\n )\n\n\ndef get_membership_body(helper: Helper) -> str:\n payload = helper.payload\n action = payload[\"action\"].tame(check_string)\n member = payload[\"member\"]\n team_name = payload[\"team\"][\"name\"].tame(check_string)\n\n return \"{sender} {action} [{username}]({html_url}) {preposition} the {team_name} team.\".format(\n sender=get_sender_name(payload),\n action=action,\n username=member[\"login\"].tame(check_string),\n html_url=member[\"html_url\"].tame(check_string),\n preposition=\"from\" if action == \"removed\" else \"to\",\n team_name=team_name,\n )\n\n\ndef get_member_body(helper: Helper) -> str:\n payload = helper.payload\n return \"{} {} [{}]({}) to [{}]({}).\".format(\n get_sender_name(payload),\n payload[\"action\"].tame(check_string),\n payload[\"member\"][\"login\"].tame(check_string),\n payload[\"member\"][\"html_url\"].tame(check_string),\n get_repository_name(payload),\n payload[\"repository\"][\"html_url\"].tame(check_string),\n )\n\n\ndef get_issue_body(helper: Helper) -> str:\n payload = helper.payload\n include_title = helper.include_title\n action = payload[\"action\"].tame(check_string)\n issue = payload[\"issue\"]\n assignee = issue[\"assignee\"]\n return get_issue_event_message(\n user_name=get_sender_name(payload),\n action=action,\n url=issue[\"html_url\"].tame(check_string),\n number=issue[\"number\"].tame(check_int),\n message=issue[\"body\"].tame(check_none_or(check_string)),\n assignee=assignee[\"login\"].tame(check_string) if assignee else None,\n title=issue[\"title\"].tame(check_string) if include_title else None,\n )\n\n\ndef get_issue_comment_body(helper: Helper) -> str:\n payload = helper.payload\n include_title = helper.include_title\n action = payload[\"action\"].tame(check_string)\n comment = payload[\"comment\"]\n issue = payload[\"issue\"]\n\n if action == \"created\":\n action = \"[commented]\"\n else:\n action = f\"{action} a [comment]\"\n action += \"({}) on\".format(comment[\"html_url\"].tame(check_string))\n\n return get_issue_event_message(\n user_name=get_sender_name(payload),\n action=action,\n url=issue[\"html_url\"].tame(check_string),\n number=issue[\"number\"].tame(check_int),\n message=comment[\"body\"].tame(check_string),\n title=issue[\"title\"].tame(check_string) if include_title else None,\n )\n\n\ndef get_fork_body(helper: Helper) -> str:\n payload = helper.payload\n forkee = payload[\"forkee\"]\n return \"{} forked [{}]({}).\".format(\n get_sender_name(payload),\n forkee[\"name\"].tame(check_string),\n forkee[\"html_url\"].tame(check_string),\n )\n\n\ndef get_deployment_body(helper: Helper) -> str:\n payload = helper.payload\n return f\"{get_sender_name(payload)} created new deployment.\"\n\n\ndef get_change_deployment_status_body(helper: Helper) -> str:\n payload = helper.payload\n return \"Deployment changed status to {}.\".format(\n payload[\"deployment_status\"][\"state\"].tame(check_string),\n )\n\n\ndef get_create_or_delete_body(helper: Helper, action: str) -> str:\n payload = helper.payload\n ref_type = payload[\"ref_type\"].tame(check_string)\n return \"{} {} {} {}.\".format(\n get_sender_name(payload),\n action,\n ref_type,\n payload[\"ref\"].tame(check_string),\n ).rstrip()\n\n\ndef get_commit_comment_body(helper: Helper) -> str:\n payload = helper.payload\n comment = payload[\"comment\"]\n comment_url = comment[\"html_url\"].tame(check_string)\n commit_url = comment_url.split(\"#\", 1)[0]\n action = f\"[commented]({comment_url})\"\n return get_commits_comment_action_message(\n get_sender_name(payload),\n action,\n commit_url,\n comment[\"commit_id\"].tame(check_string),\n comment[\"body\"].tame(check_string),\n )\n\n\ndef get_push_tags_body(helper: Helper) -> str:\n payload = helper.payload\n return get_push_tag_event_message(\n get_sender_name(payload),\n get_tag_name_from_ref(payload[\"ref\"].tame(check_string)),\n action=\"pushed\" if payload[\"created\"].tame(check_bool) else \"removed\",\n )\n\n\ndef get_push_commits_body(helper: Helper) -> str:\n payload = helper.payload\n commits_data = []\n for commit in payload[\"commits\"]:\n if commit[\"author\"].get(\"username\"):\n name = commit[\"author\"][\"username\"].tame(check_string)\n else:\n name = commit[\"author\"][\"name\"].tame(check_string)\n commits_data.append(\n {\n \"name\": name,\n \"sha\": commit[\"id\"].tame(check_string),\n \"url\": commit[\"url\"].tame(check_string),\n \"message\": commit[\"message\"].tame(check_string),\n }\n )\n return get_push_commits_event_message(\n get_sender_name(payload),\n payload[\"compare\"].tame(check_string),\n get_branch_name_from_ref(payload[\"ref\"].tame(check_string)),\n commits_data,\n deleted=payload[\"deleted\"].tame(check_bool),\n )\n\n\ndef get_discussion_body(helper: Helper) -> str:\n payload = helper.payload\n return DISCUSSION_TEMPLATE.format(\n author=get_sender_name(payload),\n url=payload[\"discussion\"][\"html_url\"].tame(check_string),\n body=payload[\"discussion\"][\"body\"].tame(check_string),\n category=payload[\"discussion\"][\"category\"][\"name\"].tame(check_string),\n discussion_id=payload[\"discussion\"][\"number\"].tame(check_int),\n title=payload[\"discussion\"][\"title\"].tame(check_string),\n )\n\n\ndef get_discussion_comment_body(helper: Helper) -> str:\n payload = helper.payload\n return DISCUSSION_COMMENT_TEMPLATE.format(\n author=get_sender_name(payload),\n body=payload[\"comment\"][\"body\"].tame(check_string),\n discussion_url=payload[\"discussion\"][\"html_url\"].tame(check_string),\n comment_url=payload[\"comment\"][\"html_url\"].tame(check_string),\n discussion_id=payload[\"discussion\"][\"number\"].tame(check_int),\n )\n\n\ndef get_public_body(helper: Helper) -> str:\n payload = helper.payload\n return \"{} made the repository [{}]({}) public.\".format(\n get_sender_name(payload),\n get_repository_full_name(payload),\n payload[\"repository\"][\"html_url\"].tame(check_string),\n )\n\n\ndef get_wiki_pages_body(helper: Helper) -> str:\n payload = helper.payload\n wiki_page_info_template = \"* {action} [{title}]({url})\\n\"\n wiki_info = \"\"\n for page in payload[\"pages\"]:\n wiki_info += wiki_page_info_template.format(\n action=page[\"action\"].tame(check_string),\n title=page[\"title\"].tame(check_string),\n url=page[\"html_url\"].tame(check_string),\n )\n return f\"{get_sender_name(payload)}:\\n{wiki_info.rstrip()}\"\n\n\ndef get_watch_body(helper: Helper) -> str:\n payload = helper.payload\n return \"{} starred the repository [{}]({}).\".format(\n get_sender_name(payload),\n get_repository_full_name(payload),\n payload[\"repository\"][\"html_url\"].tame(check_string),\n )\n\n\ndef get_repository_body(helper: Helper) -> str:\n payload = helper.payload\n return \"{} {} the repository [{}]({}).\".format(\n get_sender_name(payload),\n payload[\"action\"].tame(check_string),\n get_repository_full_name(payload),\n payload[\"repository\"][\"html_url\"].tame(check_string),\n )\n\n\ndef get_add_team_body(helper: Helper) -> str:\n payload = helper.payload\n return \"The repository [{}]({}) was added to team {}.\".format(\n get_repository_full_name(payload),\n payload[\"repository\"][\"html_url\"].tame(check_string),\n payload[\"team\"][\"name\"].tame(check_string),\n )\n\n\ndef get_team_body(helper: Helper) -> str:\n payload = helper.payload\n changes = payload[\"changes\"]\n if \"description\" in changes:\n actor = payload[\"sender\"][\"login\"].tame(check_string)\n new_description = payload[\"team\"][\"description\"].tame(check_string)\n return f\"**{actor}** changed the team description to:\\n```quote\\n{new_description}\\n```\"\n if \"name\" in changes:\n original_name = changes[\"name\"][\"from\"].tame(check_string)\n new_name = payload[\"team\"][\"name\"].tame(check_string)\n return f\"Team `{original_name}` was renamed to `{new_name}`.\"\n if \"privacy\" in changes:\n new_visibility = payload[\"team\"][\"privacy\"].tame(check_string)\n return f\"Team visibility changed to `{new_visibility}`\"\n\n missing_keys = \"/\".join(sorted(changes.keys()))\n helper.log_unsupported(f\"team/edited (changes: {missing_keys})\")\n\n # Do our best to give useful info to the customer--at least\n # if they know something changed, they can go to GitHub for\n # more details. And if it's just spam, you can control that\n # from GitHub.\n return f\"Team has changes to `{missing_keys}` data.\"\n\n\ndef get_release_body(helper: Helper) -> str:\n payload = helper.payload\n if payload[\"release\"][\"name\"]:\n release_name = payload[\"release\"][\"name\"].tame(check_string)\n else:\n release_name = payload[\"release\"][\"tag_name\"].tame(check_string)\n data = {\n \"user_name\": get_sender_name(payload),\n \"action\": payload[\"action\"].tame(check_string),\n \"tagname\": payload[\"release\"][\"tag_name\"].tame(check_string),\n # Not every GitHub release has a \"name\" set; if not there, use the tag name.\n \"release_name\": release_name,\n \"url\": payload[\"release\"][\"html_url\"].tame(check_string),\n }\n\n return get_release_event_message(**data)\n\n\ndef get_page_build_body(helper: Helper) -> str:\n payload = helper.payload\n build = payload[\"build\"]\n status = build[\"status\"].tame(check_string)\n actions = {\n \"null\": \"has yet to be built\",\n \"building\": \"is being built\",\n \"errored\": \"has failed: {}\",\n \"built\": \"has finished building\",\n }\n\n action = actions.get(status, f\"is {status}\")\n if build[\"error\"][\"message\"]:\n action = action.format(\n CONTENT_MESSAGE_TEMPLATE.format(message=build[\"error\"][\"message\"].tame(check_string)),\n )\n\n return \"GitHub Pages build, triggered by {}, {}.\".format(\n payload[\"build\"][\"pusher\"][\"login\"].tame(check_string),\n action,\n )\n\n\ndef get_status_body(helper: Helper) -> str:\n payload = helper.payload\n if payload[\"target_url\"]:\n status = \"[{}]({})\".format(\n payload[\"state\"].tame(check_string),\n payload[\"target_url\"].tame(check_string),\n )\n else:\n status = payload[\"state\"].tame(check_string)\n return \"[{}]({}) changed its status to {}.\".format(\n get_short_sha(payload[\"sha\"].tame(check_string)),\n payload[\"commit\"][\"html_url\"].tame(check_string),\n status,\n )\n\n\ndef get_locked_or_unlocked_pull_request_body(helper: Helper) -> str:\n payload = helper.payload\n\n action = payload[\"action\"].tame(check_string)\n\n message = \"{sender} has locked [PR #{pr_number}]({pr_url}) as {reason} and limited conversation to collaborators.\"\n if action == \"unlocked\":\n message = \"{sender} has unlocked [PR #{pr_number}]({pr_url}).\"\n if payload[\"pull_request\"][\"active_lock_reason\"]:\n active_lock_reason = payload[\"pull_request\"][\"active_lock_reason\"].tame(check_string)\n else:\n active_lock_reason = None\n return message.format(\n sender=get_sender_name(payload),\n pr_number=payload[\"pull_request\"][\"number\"].tame(check_int),\n pr_url=payload[\"pull_request\"][\"html_url\"].tame(check_string),\n reason=active_lock_reason,\n )\n\n\ndef get_pull_request_auto_merge_body(helper: Helper) -> str:\n payload = helper.payload\n\n action = payload[\"action\"].tame(check_string)\n\n message = \"{sender} has enabled auto merge for [PR #{pr_number}]({pr_url}).\"\n if action == \"auto_merge_disabled\":\n message = \"{sender} has disabled auto merge for [PR #{pr_number}]({pr_url}).\"\n return message.format(\n sender=get_sender_name(payload),\n pr_number=payload[\"pull_request\"][\"number\"].tame(check_int),\n pr_url=payload[\"pull_request\"][\"html_url\"].tame(check_string),\n )\n\n\ndef get_pull_request_ready_for_review_body(helper: Helper) -> str:\n payload = helper.payload\n\n message = \"**{sender}** has marked [PR #{pr_number}]({pr_url}) as ready for review.\"\n return message.format(\n sender=get_sender_name(payload),\n pr_number=payload[\"pull_request\"][\"number\"].tame(check_int),\n pr_url=payload[\"pull_request\"][\"html_url\"].tame(check_string),\n )\n\n\ndef get_pull_request_review_body(helper: Helper) -> str:\n payload = helper.payload\n include_title = helper.include_title\n title = \"for #{} {}\".format(\n payload[\"pull_request\"][\"number\"].tame(check_int),\n payload[\"pull_request\"][\"title\"].tame(check_string),\n )\n return get_pull_request_event_message(\n user_name=get_sender_name(payload),\n action=\"submitted\",\n url=payload[\"review\"][\"html_url\"].tame(check_string),\n type=\"PR review\",\n title=title if include_title else None,\n message=payload[\"review\"][\"body\"].tame(check_string),\n )\n\n\ndef get_pull_request_review_comment_body(helper: Helper) -> str:\n payload = helper.payload\n include_title = helper.include_title\n action = payload[\"action\"].tame(check_string)\n message = None\n if action == \"created\":\n message = payload[\"comment\"][\"body\"].tame(check_string)\n\n title = \"on #{} {}\".format(\n payload[\"pull_request\"][\"number\"].tame(check_int),\n payload[\"pull_request\"][\"title\"].tame(check_string),\n )\n\n return get_pull_request_event_message(\n user_name=get_sender_name(payload),\n action=action,\n url=payload[\"comment\"][\"html_url\"].tame(check_string),\n message=message,\n type=\"PR review comment\",\n title=title if include_title else None,\n )\n\n\ndef get_pull_request_review_requested_body(helper: Helper) -> str:\n payload = helper.payload\n include_title = helper.include_title\n requested_reviewer = [payload[\"requested_reviewer\"]] if \"requested_reviewer\" in payload else []\n\n requested_team = [payload[\"requested_team\"]] if \"requested_team\" in payload else []\n\n sender = get_sender_name(payload)\n pr_number = payload[\"pull_request\"][\"number\"].tame(check_int)\n pr_url = payload[\"pull_request\"][\"html_url\"].tame(check_string)\n message = \"**{sender}** requested {reviewers} for a review on [PR #{pr_number}]({pr_url}).\"\n message_with_title = (\n \"**{sender}** requested {reviewers} for a review on [PR #{pr_number} {title}]({pr_url}).\"\n )\n body = message_with_title if include_title else message\n\n all_reviewers = []\n\n for reviewer in requested_reviewer:\n all_reviewers.append(\n \"[{login}]({html_url})\".format(\n login=reviewer[\"login\"].tame(check_string),\n html_url=reviewer[\"html_url\"].tame(check_string),\n )\n )\n\n for team_reviewer in requested_team:\n all_reviewers.append(\n \"[{name}]({html_url})\".format(\n name=team_reviewer[\"name\"].tame(check_string),\n html_url=team_reviewer[\"html_url\"].tame(check_string),\n )\n )\n\n reviewers = \"\"\n reviewers = all_reviewers[0]\n\n return body.format(\n sender=sender,\n reviewers=reviewers,\n pr_number=pr_number,\n pr_url=pr_url,\n title=payload[\"pull_request\"][\"title\"].tame(check_string) if include_title else None,\n )\n\n\ndef get_check_run_body(helper: Helper) -> str:\n payload = helper.payload\n template = \"\"\"\nCheck [{name}]({html_url}) {status} ({conclusion}). ([{short_hash}]({commit_url}))\n\"\"\".strip()\n\n kwargs = {\n \"name\": payload[\"check_run\"][\"name\"].tame(check_string),\n \"html_url\": payload[\"check_run\"][\"html_url\"].tame(check_string),\n \"status\": payload[\"check_run\"][\"status\"].tame(check_string),\n \"short_hash\": get_short_sha(payload[\"check_run\"][\"head_sha\"].tame(check_string)),\n \"commit_url\": \"{}/commit/{}\".format(\n payload[\"repository\"][\"html_url\"].tame(check_string),\n payload[\"check_run\"][\"head_sha\"].tame(check_string),\n ),\n \"conclusion\": payload[\"check_run\"][\"conclusion\"].tame(check_string),\n }\n\n return template.format(**kwargs)\n\n\ndef get_star_body(helper: Helper) -> str:\n payload = helper.payload\n template = \"{user} {action} the repository [{repo}]({url}).\"\n return template.format(\n user=payload[\"sender\"][\"login\"].tame(check_string),\n action=\"starred\" if payload[\"action\"].tame(check_string) == \"created\" else \"unstarred\",\n repo=get_repository_full_name(payload),\n url=payload[\"repository\"][\"html_url\"].tame(check_string),\n )\n\n\ndef get_ping_body(helper: Helper) -> str:\n payload = helper.payload\n return get_setup_webhook_message(\"GitHub\", get_sender_name(payload))\n\n\ndef get_repository_name(payload: WildValue) -> str:\n return payload[\"repository\"][\"name\"].tame(check_string)\n\n\ndef get_repository_full_name(payload: WildValue) -> str:\n return payload[\"repository\"][\"full_name\"].tame(check_string)\n\n\ndef get_organization_name(payload: WildValue) -> str:\n return payload[\"organization\"][\"login\"].tame(check_string)\n\n\ndef get_sender_name(payload: WildValue) -> str:\n return payload[\"sender\"][\"login\"].tame(check_string)\n\n\ndef get_branch_name_from_ref(ref_string: str) -> str:\n return re.sub(r\"^refs/heads/\", \"\", ref_string)\n\n\ndef get_tag_name_from_ref(ref_string: str) -> str:\n return re.sub(r\"^refs/tags/\", \"\", ref_string)\n\n\ndef is_commit_push_event(payload: WildValue) -> bool:\n return bool(re.match(r\"^refs/heads/\", payload[\"ref\"].tame(check_string)))\n\n\ndef get_subject_based_on_type(payload: WildValue, event: str) -> str:\n if \"pull_request\" in event:\n return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(\n repo=get_repository_name(payload),\n type=\"PR\",\n id=payload[\"pull_request\"][\"number\"].tame(check_int),\n title=payload[\"pull_request\"][\"title\"].tame(check_string),\n )\n elif event.startswith(\"issue\"):\n return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(\n repo=get_repository_name(payload),\n type=\"issue\",\n id=payload[\"issue\"][\"number\"].tame(check_int),\n title=payload[\"issue\"][\"title\"].tame(check_string),\n )\n elif event.startswith(\"deployment\"):\n return \"{} / Deployment on {}\".format(\n get_repository_name(payload),\n payload[\"deployment\"][\"environment\"].tame(check_string),\n )\n elif event == \"membership\":\n return \"{} organization\".format(payload[\"organization\"][\"login\"].tame(check_string))\n elif event == \"team\":\n return \"team {}\".format(payload[\"team\"][\"name\"].tame(check_string))\n elif event == \"push_commits\":\n return TOPIC_WITH_BRANCH_TEMPLATE.format(\n repo=get_repository_name(payload),\n branch=get_branch_name_from_ref(payload[\"ref\"].tame(check_string)),\n )\n elif event == \"gollum\":\n return TOPIC_WITH_BRANCH_TEMPLATE.format(\n repo=get_repository_name(payload),\n branch=\"wiki pages\",\n )\n elif event == \"ping\":\n if not payload.get(\"repository\"):\n return get_organization_name(payload)\n elif event == \"check_run\":\n return f\"{get_repository_name(payload)} / checks\"\n elif event.startswith(\"discussion\"):\n return TOPIC_FOR_DISCUSSION.format(\n repo=get_repository_name(payload),\n number=payload[\"discussion\"][\"number\"].tame(check_int),\n title=payload[\"discussion\"][\"title\"].tame(check_string),\n )\n\n return get_repository_name(payload)\n\n\nEVENT_FUNCTION_MAPPER: Dict[str, Callable[[Helper], str]] = {\n \"commit_comment\": get_commit_comment_body,\n \"closed_pull_request\": get_closed_pull_request_body,\n \"create\": partial(get_create_or_delete_body, action=\"created\"),\n \"check_run\": get_check_run_body,\n \"delete\": partial(get_create_or_delete_body, action=\"deleted\"),\n \"deployment\": get_deployment_body,\n \"deployment_status\": get_change_deployment_status_body,\n \"discussion\": get_discussion_body,\n \"discussion_comment\": get_discussion_comment_body,\n \"fork\": get_fork_body,\n \"gollum\": get_wiki_pages_body,\n \"issue_comment\": get_issue_comment_body,\n \"issues\": get_issue_body,\n \"member\": get_member_body,\n \"membership\": get_membership_body,\n \"opened_or_update_pull_request\": get_opened_or_update_pull_request_body,\n \"assigned_or_unassigned_pull_request\": get_assigned_or_unassigned_pull_request_body,\n \"page_build\": get_page_build_body,\n \"ping\": get_ping_body,\n \"public\": get_public_body,\n \"pull_request_ready_for_review\": get_pull_request_ready_for_review_body,\n \"pull_request_review\": get_pull_request_review_body,\n \"pull_request_review_comment\": get_pull_request_review_comment_body,\n \"pull_request_review_requested\": get_pull_request_review_requested_body,\n \"pull_request_auto_merge\": get_pull_request_auto_merge_body,\n \"locked_or_unlocked_pull_request\": get_locked_or_unlocked_pull_request_body,\n \"push_commits\": get_push_commits_body,\n \"push_tags\": get_push_tags_body,\n \"release\": get_release_body,\n \"repository\": get_repository_body,\n \"star\": get_star_body,\n \"status\": get_status_body,\n \"team\": get_team_body,\n \"team_add\": get_add_team_body,\n \"watch\": get_watch_body,\n}\n\nIGNORED_EVENTS = [\n \"check_suite\",\n \"label\",\n \"meta\",\n \"milestone\",\n \"organization\",\n \"project_card\",\n \"repository_vulnerability_alert\",\n]\n\nIGNORED_PULL_REQUEST_ACTIONS = [\n \"approved\",\n \"converted_to_draft\",\n \"labeled\",\n \"review_request_removed\",\n \"unlabeled\",\n]\n\nIGNORED_TEAM_ACTIONS = [\n # These are actions that are well documented by github\n # (https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads)\n # but we ignore them for now, possibly just due to laziness.\n # One curious example here is team/added_to_repository, which is\n # possibly the same as team_add.\n \"added_to_repository\",\n \"created\",\n \"deleted\",\n \"removed_from_repository\",\n]\n\nALL_EVENT_TYPES = list(EVENT_FUNCTION_MAPPER.keys())\n\n\n@webhook_view(\"GitHub\", notify_bot_owner_on_invalid_json=True, all_event_types=ALL_EVENT_TYPES)\n@has_request_variables\ndef api_github_webhook(\n request: HttpRequest,\n user_profile: UserProfile,\n payload: WildValue = REQ(argument_type=\"body\", converter=to_wild_value),\n branches: Optional[str] = REQ(default=None),\n user_specified_topic: Optional[str] = REQ(\"topic\", default=None),\n) -> HttpResponse:\n \"\"\"\n GitHub sends the event as an HTTP header. We have our\n own Zulip-specific concept of an event that often maps\n directly to the X-GitHub-Event header's event, but we sometimes\n refine it based on the payload.\n \"\"\"\n header_event = validate_extract_webhook_http_header(request, \"X-GitHub-Event\", \"GitHub\")\n if header_event is None:\n raise UnsupportedWebhookEventTypeError(\"no header provided\")\n\n event = get_zulip_event_name(header_event, payload, branches)\n if event is None:\n # This is nothing to worry about--get_event() returns None\n # for events that are valid but not yet handled by us.\n # See IGNORED_EVENTS, for example.\n return json_success(request)\n subject = get_subject_based_on_type(payload, event)\n\n body_function = EVENT_FUNCTION_MAPPER[event]\n\n helper = Helper(\n payload=payload,\n include_title=user_specified_topic is not None,\n )\n body = body_function(helper)\n\n check_send_webhook_message(request, user_profile, subject, body, event)\n return json_success(request)\n\n\ndef get_zulip_event_name(\n header_event: str,\n payload: WildValue,\n branches: Optional[str],\n) -> Optional[str]:\n \"\"\"\n Usually, we return an event name that is a key in EVENT_FUNCTION_MAPPER.\n\n We return None for an event that we know we don't want to handle.\n \"\"\"\n if header_event == \"pull_request\":\n action = payload[\"action\"].tame(check_string)\n if action in (\"opened\", \"synchronize\", \"reopened\", \"edited\"):\n return \"opened_or_update_pull_request\"\n if action in (\"assigned\", \"unassigned\"):\n return \"assigned_or_unassigned_pull_request\"\n if action == \"closed\":\n return \"closed_pull_request\"\n if action == \"review_requested\":\n return \"pull_request_review_requested\"\n if action == \"ready_for_review\":\n return \"pull_request_ready_for_review\"\n if action in (\"locked\", \"unlocked\"):\n return \"locked_or_unlocked_pull_request\"\n if action in (\"auto_merge_enabled\", \"auto_merge_disabled\"):\n return \"pull_request_auto_merge\"\n if action in IGNORED_PULL_REQUEST_ACTIONS:\n return None\n elif header_event == \"push\":\n if is_commit_push_event(payload):\n if branches is not None:\n branch = get_branch_name_from_ref(payload[\"ref\"].tame(check_string))\n if branches.find(branch) == -1:\n return None\n return \"push_commits\"\n else:\n return \"push_tags\"\n elif header_event == \"check_run\":\n if payload[\"check_run\"][\"status\"].tame(check_string) != \"completed\":\n return None\n return header_event\n elif header_event == \"team\":\n action = payload[\"action\"].tame(check_string)\n if action == \"edited\":\n return \"team\"\n if action in IGNORED_TEAM_ACTIONS:\n # no need to spam our logs, we just haven't implemented it yet\n return None\n else:\n # this means GH has actually added new actions since September 2020,\n # so it's a bit more cause for alarm\n raise UnsupportedWebhookEventTypeError(f\"unsupported team action {action}\")\n elif header_event in list(EVENT_FUNCTION_MAPPER.keys()):\n return header_event\n elif header_event in IGNORED_EVENTS:\n return None\n\n complete_event = \"{}:{}\".format(\n header_event, payload.get(\"action\", \"???\").tame(check_string)\n ) # nocoverage\n raise UnsupportedWebhookEventTypeError(complete_event)\n",
"path": "zerver/webhooks/github/view.py"
}
] | diff --git a/zerver/webhooks/github/tests.py b/zerver/webhooks/github/tests.py
index 72a4a02249a8b..146c865b18586 100644
--- a/zerver/webhooks/github/tests.py
+++ b/zerver/webhooks/github/tests.py
@@ -240,13 +240,13 @@ def test_status_with_target_url_msg(self) -> None:
self.check_webhook("status__with_target_url", TOPIC_REPO, expected_message)
def test_pull_request_review_msg(self) -> None:
- expected_message = "baxterthehacker submitted [PR review](https://github.com/baxterthehacker/public-repo/pull/1#pullrequestreview-2626884)."
+ expected_message = "baxterthehacker submitted [PR review](https://github.com/baxterthehacker/public-repo/pull/1#pullrequestreview-2626884):\n\n~~~ quote\nLooks great!\n~~~"
self.check_webhook("pull_request_review", TOPIC_PR, expected_message)
def test_pull_request_review_msg_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic="notifications")
expected_topic = "notifications"
- expected_message = "baxterthehacker submitted [PR review for #1 Update the README with new information](https://github.com/baxterthehacker/public-repo/pull/1#pullrequestreview-2626884)."
+ expected_message = "baxterthehacker submitted [PR review for #1 Update the README with new information](https://github.com/baxterthehacker/public-repo/pull/1#pullrequestreview-2626884):\n\n~~~ quote\nLooks great!\n~~~"
self.check_webhook("pull_request_review", expected_topic, expected_message)
def test_pull_request_review_comment_msg(self) -> None:
diff --git a/zerver/webhooks/github/view.py b/zerver/webhooks/github/view.py
index ea22a580ead3c..8b414adcac2a2 100644
--- a/zerver/webhooks/github/view.py
+++ b/zerver/webhooks/github/view.py
@@ -490,6 +490,7 @@ def get_pull_request_review_body(helper: Helper) -> str:
url=payload["review"]["html_url"].tame(check_string),
type="PR review",
title=title if include_title else None,
+ message=payload["review"]["body"].tame(check_string),
)
|
wright-group__WrightTools-590 | Change __version__ to match pep 440
Specifically, when a branch is specified, it should use a plus sign instead of minus
https://www.python.org/dev/peps/pep-0440/#local-version-identifiers
https://github.com/wright-group/WrightTools/blob/490a4a3d6fb6f016e7033d661b553b72c2d86fcb/WrightTools/__version__.py#L33
| [
{
"content": "\"\"\"Define WrightTools version.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport os\n\n\n# ---- define -------------------------------------------------------------------------------------\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\n__all__ = ['__version__', '__branch__']\n\n\n# --- version -------------------------------------------------------------------------------------\n\n\n# read from VERSION file\nwith open(os.path.join(os.path.dirname(here), 'VERSION')) as f:\n __version__ = f.read().strip()\n\n\n# add git branch, if appropriate\np = os.path.join(os.path.dirname(here), '.git', 'HEAD')\nif os.path.isfile(p):\n with open(p) as f:\n __branch__ = f.readline().rstrip().split(r'/')[-1]\n if __branch__ != 'master':\n __version__ += '-' + __branch__\nelse:\n __branch__ = None\n",
"path": "WrightTools/__version__.py"
}
] | [
{
"content": "\"\"\"Define WrightTools version.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport os\n\n\n# ---- define -------------------------------------------------------------------------------------\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\n__all__ = ['__version__', '__branch__']\n\n\n# --- version -------------------------------------------------------------------------------------\n\n\n# read from VERSION file\nwith open(os.path.join(os.path.dirname(here), 'VERSION')) as f:\n __version__ = f.read().strip()\n\n\n# add git branch, if appropriate\np = os.path.join(os.path.dirname(here), '.git', 'HEAD')\nif os.path.isfile(p):\n with open(p) as f:\n __branch__ = f.readline().rstrip().split(r'/')[-1]\n if __branch__ != 'master':\n __version__ += '+' + __branch__\nelse:\n __branch__ = None\n",
"path": "WrightTools/__version__.py"
}
] | diff --git a/WrightTools/__version__.py b/WrightTools/__version__.py
index 1820a0395..2d2685956 100644
--- a/WrightTools/__version__.py
+++ b/WrightTools/__version__.py
@@ -30,6 +30,6 @@
with open(p) as f:
__branch__ = f.readline().rstrip().split(r'/')[-1]
if __branch__ != 'master':
- __version__ += '-' + __branch__
+ __version__ += '+' + __branch__
else:
__branch__ = None
|
plotly__dash-601 | Extract meta failure on missing props docstring.
If a props is missing a docstring, it will fail to generate the component with js error, `Cannot read property 'length' of undefined`.
https://community.plot.ly/t/dash-component-creation-javascript-ok-nothing-rendered-in-python/19369
| [
{
"content": "from __future__ import print_function\nfrom collections import OrderedDict\n\nimport json\nimport sys\nimport subprocess\nimport shlex\nimport os\nimport argparse\nimport shutil\nimport functools\n\nimport pkg_resources\n\nfrom ._r_components_generation import write_class_file\nfrom ._r_components_generation import generate_exports\nfrom ._py_components_generation import generate_class_file\nfrom ._py_components_generation import generate_imports\nfrom ._py_components_generation import generate_classes_files\n\n\nclass _CombinedFormatter(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawDescriptionHelpFormatter):\n pass\n\n\n# pylint: disable=too-many-locals\ndef generate_components(components_source, project_shortname,\n package_info_filename='package.json',\n ignore='^_',\n rprefix=None):\n\n project_shortname = project_shortname.replace('-', '_').rstrip('/\\\\')\n\n if rprefix:\n prefix = rprefix\n\n is_windows = sys.platform == 'win32'\n\n extract_path = pkg_resources.resource_filename('dash', 'extract-meta.js')\n\n os.environ['NODE_PATH'] = 'node_modules'\n cmd = shlex.split(\n 'node {} {} {}'.format(extract_path, ignore, components_source),\n posix=not is_windows\n )\n\n shutil.copyfile('package.json',\n os.path.join(project_shortname, package_info_filename))\n\n proc = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=is_windows)\n out, err = proc.communicate()\n status = proc.poll()\n\n if err:\n print(err.decode(), file=sys.stderr)\n\n if not out:\n print(\n 'Error generating metadata in {} (status={})'.format(\n project_shortname, status),\n file=sys.stderr)\n sys.exit(1)\n\n jsondata_unicode = json.loads(out.decode(), object_pairs_hook=OrderedDict)\n\n if sys.version_info[0] >= 3:\n metadata = jsondata_unicode\n else:\n metadata = byteify(jsondata_unicode)\n\n generator_methods = [generate_class_file]\n\n if rprefix:\n if not os.path.exists('man'):\n os.makedirs('man')\n if not os.path.exists('R'):\n os.makedirs('R')\n generator_methods.append(\n functools.partial(write_class_file, prefix=prefix))\n\n components = generate_classes_files(\n project_shortname,\n metadata,\n *generator_methods\n )\n\n with open(os.path.join(project_shortname, 'metadata.json'), 'w') as f:\n json.dump(metadata, f)\n\n generate_imports(project_shortname, components)\n\n if rprefix:\n with open('package.json', 'r') as f:\n jsondata_unicode = json.load(f, object_pairs_hook=OrderedDict)\n if sys.version_info[0] >= 3:\n pkg_data = jsondata_unicode\n else:\n pkg_data = byteify(jsondata_unicode)\n\n generate_exports(\n project_shortname, components, metadata, pkg_data, prefix\n )\n\n\ndef cli():\n parser = argparse.ArgumentParser(\n prog='dash-generate-components',\n formatter_class=_CombinedFormatter,\n description='Generate dash components by extracting the metadata '\n 'using react-docgen. Then map the metadata to python classes.'\n )\n parser.add_argument('components_source',\n help='React components source directory.')\n parser.add_argument(\n 'project_shortname',\n help='Name of the project to export the classes files.'\n )\n parser.add_argument(\n '-p', '--package-info-filename',\n default='package.json',\n help='The filename of the copied `package.json` to `project_shortname`'\n )\n parser.add_argument(\n '-i', '--ignore',\n default='^_',\n help='Files/directories matching the pattern will be ignored'\n )\n parser.add_argument(\n '--r-prefix',\n help='Experimental: specify a prefix for DashR component names, write'\n 'DashR components to R dir, create R package.'\n )\n\n args = parser.parse_args()\n generate_components(\n args.components_source, args.project_shortname,\n package_info_filename=args.package_info_filename,\n ignore=args.ignore,\n rprefix=args.r_prefix)\n\n\n# pylint: disable=undefined-variable\ndef byteify(input_object):\n if isinstance(input_object, dict):\n return OrderedDict([\n (byteify(key), byteify(value))\n for key, value in input_object.iteritems()\n ])\n elif isinstance(input_object, list):\n return [byteify(element) for element in input_object]\n elif isinstance(input_object, unicode): # noqa:F821\n return input_object.encode('utf-8')\n return input_object\n\n\nif __name__ == '__main__':\n cli()\n",
"path": "dash/development/component_generator.py"
}
] | [
{
"content": "from __future__ import print_function\nfrom collections import OrderedDict\n\nimport json\nimport sys\nimport subprocess\nimport shlex\nimport os\nimport argparse\nimport shutil\nimport functools\n\nimport pkg_resources\n\nfrom ._r_components_generation import write_class_file\nfrom ._r_components_generation import generate_exports\nfrom ._py_components_generation import generate_class_file\nfrom ._py_components_generation import generate_imports\nfrom ._py_components_generation import generate_classes_files\n\n\nclass _CombinedFormatter(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawDescriptionHelpFormatter):\n pass\n\n\n# pylint: disable=too-many-locals\ndef generate_components(components_source, project_shortname,\n package_info_filename='package.json',\n ignore='^_',\n rprefix=None):\n\n project_shortname = project_shortname.replace('-', '_').rstrip('/\\\\')\n\n if rprefix:\n prefix = rprefix\n\n is_windows = sys.platform == 'win32'\n\n extract_path = pkg_resources.resource_filename('dash', 'extract-meta.js')\n\n os.environ['NODE_PATH'] = 'node_modules'\n cmd = shlex.split(\n 'node {} {} {}'.format(extract_path, ignore, components_source),\n posix=not is_windows\n )\n\n shutil.copyfile('package.json',\n os.path.join(project_shortname, package_info_filename))\n\n proc = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=is_windows)\n out, err = proc.communicate()\n status = proc.poll()\n\n if err:\n print(err.decode(), file=sys.stderr)\n\n if not out:\n print(\n 'Error generating metadata in {} (status={})'.format(\n project_shortname, status),\n file=sys.stderr)\n sys.exit(1)\n\n jsondata_unicode = json.loads(out.decode(), object_pairs_hook=OrderedDict)\n\n if sys.version_info[0] >= 3:\n metadata = jsondata_unicode\n else:\n metadata = byteify(jsondata_unicode)\n\n generator_methods = [generate_class_file]\n\n if rprefix:\n if not os.path.exists('man'):\n os.makedirs('man')\n if not os.path.exists('R'):\n os.makedirs('R')\n generator_methods.append(\n functools.partial(write_class_file, prefix=prefix))\n\n components = generate_classes_files(\n project_shortname,\n metadata,\n *generator_methods\n )\n\n with open(os.path.join(project_shortname, 'metadata.json'), 'w') as f:\n json.dump(metadata, f, indent=2)\n\n generate_imports(project_shortname, components)\n\n if rprefix:\n with open('package.json', 'r') as f:\n jsondata_unicode = json.load(f, object_pairs_hook=OrderedDict)\n if sys.version_info[0] >= 3:\n pkg_data = jsondata_unicode\n else:\n pkg_data = byteify(jsondata_unicode)\n\n generate_exports(\n project_shortname, components, metadata, pkg_data, prefix\n )\n\n\ndef cli():\n parser = argparse.ArgumentParser(\n prog='dash-generate-components',\n formatter_class=_CombinedFormatter,\n description='Generate dash components by extracting the metadata '\n 'using react-docgen. Then map the metadata to python classes.'\n )\n parser.add_argument('components_source',\n help='React components source directory.')\n parser.add_argument(\n 'project_shortname',\n help='Name of the project to export the classes files.'\n )\n parser.add_argument(\n '-p', '--package-info-filename',\n default='package.json',\n help='The filename of the copied `package.json` to `project_shortname`'\n )\n parser.add_argument(\n '-i', '--ignore',\n default='^_',\n help='Files/directories matching the pattern will be ignored'\n )\n parser.add_argument(\n '--r-prefix',\n help='Experimental: specify a prefix for DashR component names, write'\n 'DashR components to R dir, create R package.'\n )\n\n args = parser.parse_args()\n generate_components(\n args.components_source, args.project_shortname,\n package_info_filename=args.package_info_filename,\n ignore=args.ignore,\n rprefix=args.r_prefix)\n\n\n# pylint: disable=undefined-variable\ndef byteify(input_object):\n if isinstance(input_object, dict):\n return OrderedDict([\n (byteify(key), byteify(value))\n for key, value in input_object.iteritems()\n ])\n elif isinstance(input_object, list):\n return [byteify(element) for element in input_object]\n elif isinstance(input_object, unicode): # noqa:F821\n return input_object.encode('utf-8')\n return input_object\n\n\nif __name__ == '__main__':\n cli()\n",
"path": "dash/development/component_generator.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index e90fbfb7d1..54efb82c0c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,8 @@
+## UNRELEASED
+## Fixed
+- Fix missing indentation for generated metadata.json [#600](https://github.com/plotly/dash/issues/600)
+- Fix missing component prop docstring error [#598](https://github.com/plotly/dash/issues/598)
+
## [0.37.0] - 2019-02-11
## Fixed
- Fixed collections.abc deprecation warning for python 3.8 [#563](https://github.com/plotly/dash/pull/563)
diff --git a/dash/development/component_generator.py b/dash/development/component_generator.py
index 99c19051a0..ed056ec2ae 100644
--- a/dash/development/component_generator.py
+++ b/dash/development/component_generator.py
@@ -89,7 +89,7 @@ def generate_components(components_source, project_shortname,
)
with open(os.path.join(project_shortname, 'metadata.json'), 'w') as f:
- json.dump(metadata, f)
+ json.dump(metadata, f, indent=2)
generate_imports(project_shortname, components)
diff --git a/dash/extract-meta.js b/dash/extract-meta.js
index 0040b479b4..f8571f08f1 100644
--- a/dash/extract-meta.js
+++ b/dash/extract-meta.js
@@ -42,7 +42,7 @@ function writeError(msg, filePath) {
}
function checkWarn(name, value) {
- if (value.length < 1 && !excludedDocProps.includes(name.split('.').pop())) {
+ if (!value || (value.length < 1 && !excludedDocProps.includes(name.split('.').pop()))) {
process.stderr.write(`\nDescription for ${name} is missing!\n`)
}
}
|
NVIDIA__NVFlare-2458 | [BUG] RuntimeError during running spleen_ct_segmentation_sim and spleen_ct_segmentation_local
```
2024-03-20 14:37:59,155 - ClientTaskWorker - INFO - Clean up ClientRunner for : site-1
2024-03-20 14:37:59,157 - nvflare.fuel.f3.sfm.conn_manager - INFO - Connection [CN00002 Not Connected] is closed PID: 71655
2024-03-20 14:37:59,157 - nvflare.fuel.f3.sfm.conn_manager - INFO - Connection [CN00004 Not Connected] is closed PID: 71550
2024-03-20 14:37:59,401 - CoreCell - ERROR - site-1.simulate_job.0: error stopping Communicator: RuntimeError: cannot join current thread
2024-03-20 14:37:59,402 - CoreCell - ERROR - Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/nvflare/fuel/f3/cellnet/core_cell.py", line 899, in stop
self.communicator.stop()
File "/usr/local/lib/python3.10/dist-packages/nvflare/fuel/f3/communicator.py", line 84, in stop
self.conn_manager.stop()
File "/usr/local/lib/python3.10/dist-packages/nvflare/fuel/f3/sfm/conn_manager.py", line 155, in stop
self.frame_mgr_executor.shutdown(True)
File "/usr/lib/python3.10/concurrent/futures/thread.py", line 235, in shutdown
t.join()
File "/usr/lib/python3.10/threading.py", line 1093, in join
raise RuntimeError("cannot join current thread")
RuntimeError: cannot join current thread
2024-03-20 14:37:59,765 - SubWorkerExecutor - INFO - SubWorkerExecutor process shutdown.
2024-03-20 14:38:00,090 - SubWorkerExecutor - INFO - SubWorkerExecutor process shutdown.
2024-03-20 14:38:00,417 - SimulatorServer - INFO - Server app stopped.
```
The run command:
nvflare simulator /opt/toolkit/tutorials/fl/spleen_ct_segmentation_sim/job_multi_gpu --workspace sim_spleen_ct_seg --threads 1 --n_clients 1
nvflare version: 2.4.1rc1
| [
{
"content": "# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Sub_worker process to start the multi-processes client.\"\"\"\n\nimport argparse\nimport copy\nimport logging\nimport os\nimport sys\nimport threading\nimport time\n\nfrom nvflare.apis.event_type import EventType\nfrom nvflare.apis.executor import Executor\nfrom nvflare.apis.fl_component import FLComponent\nfrom nvflare.apis.fl_constant import FLContextKey\nfrom nvflare.apis.fl_context import FLContext\nfrom nvflare.apis.signal import Signal\nfrom nvflare.apis.utils.fl_context_utils import get_serializable_data\nfrom nvflare.apis.workspace import Workspace\nfrom nvflare.app_common.executors.multi_process_executor import WorkerComponentBuilder\nfrom nvflare.fuel.common.multi_process_executor_constants import (\n CommunicateData,\n CommunicationMetaData,\n MultiProcessCommandNames,\n)\nfrom nvflare.fuel.f3.cellnet.cell import Cell\nfrom nvflare.fuel.f3.cellnet.core_cell import Message as CellMessage\nfrom nvflare.fuel.f3.cellnet.core_cell import MessageHeaderKey, make_reply\nfrom nvflare.fuel.f3.cellnet.defs import ReturnCode\nfrom nvflare.fuel.f3.cellnet.fqcn import FQCN\nfrom nvflare.fuel.f3.cellnet.net_agent import NetAgent\nfrom nvflare.fuel.f3.mpm import MainProcessMonitor as mpm\nfrom nvflare.fuel.sec.audit import AuditService\nfrom nvflare.fuel.sec.security_content_service import SecurityContentService\nfrom nvflare.private.defs import CellChannel, CellChannelTopic, new_cell_message\nfrom nvflare.private.fed.app.fl_conf import create_privacy_manager\nfrom nvflare.private.fed.app.utils import monitor_parent_process\nfrom nvflare.private.fed.client.client_run_manager import ClientRunManager\nfrom nvflare.private.fed.runner import Runner\nfrom nvflare.private.fed.simulator.simulator_app_runner import SimulatorClientRunManager\nfrom nvflare.private.fed.utils.fed_utils import (\n add_logfile_handler,\n configure_logging,\n create_stats_pool_files_for_job,\n fobs_initialize,\n set_stats_pool_config_for_job,\n)\nfrom nvflare.private.privacy_manager import PrivacyService\n\n\nclass EventRelayer(FLComponent):\n \"\"\"To relay the event from the worker_process.\"\"\"\n\n def __init__(self, cell, parent_fqcn, local_rank):\n \"\"\"To init the EventRelayer.\n\n Args:\n cell: the local cell.\n parent_fqcn: FQCN of the parent cell\n local_rank: process local rank\n \"\"\"\n super().__init__()\n self.cell = cell\n self.parent_fqcn = parent_fqcn\n self.local_rank = local_rank\n\n self.event_lock = threading.Lock()\n self.start_run_fired = False\n\n def relay_event(self, run_manager, data):\n \"\"\"To relay the event.\n\n Args:\n run_manager: Client_Run_Manager\n data: event data\n\n \"\"\"\n with run_manager.new_context() as fl_ctx:\n event_type = data[CommunicationMetaData.EVENT_TYPE]\n if event_type == EventType.START_RUN:\n if self.start_run_fired:\n return\n else:\n self.start_run_fired = True\n fl_ctx.props.update(data[CommunicationMetaData.FL_CTX].props)\n\n fl_ctx.set_prop(\n FLContextKey.EVENT_ORIGIN_SITE, CommunicateData.MULTI_PROCESS_EXECUTOR, private=True, sticky=False\n )\n self.fire_event(event_type=event_type, fl_ctx=fl_ctx)\n\n def handle_event(self, event_type: str, fl_ctx: FLContext):\n \"\"\"To handle the event.\n\n Args:\n event_type: event_type\n fl_ctx: FLContext\n\n \"\"\"\n event_site = fl_ctx.get_prop(FLContextKey.EVENT_ORIGIN_SITE)\n\n new_fl_ctx = FLContext()\n new_fl_ctx.props.update(copy.deepcopy(get_serializable_data(fl_ctx).props))\n if event_site != CommunicateData.MULTI_PROCESS_EXECUTOR:\n with self.event_lock:\n try:\n data = {\n CommunicationMetaData.EVENT_TYPE: event_type,\n CommunicationMetaData.RANK_NUMBER: self.local_rank,\n CommunicationMetaData.FL_CTX: new_fl_ctx,\n }\n\n request = new_cell_message({}, data)\n return_data = self.cell.send_request(\n target=self.parent_fqcn,\n channel=CellChannel.MULTI_PROCESS_EXECUTOR,\n topic=CellChannelTopic.FIRE_EVENT,\n request=request,\n )\n # update the fl_ctx from the child process return data.\n fl_ctx.props.update(return_data.payload[CommunicationMetaData.FL_CTX].props)\n except Exception:\n self.log_warning(\n fl_ctx, f\"Failed to relay the event to parent process. Event: {event_type}\", fire_event=False\n )\n\n\nclass SubWorkerExecutor(Runner):\n def __init__(self, args, workspace, num_of_processes, local_rank) -> None:\n super().__init__()\n\n self.args = args\n self.workspace = workspace\n self.components = {}\n self.handlers = []\n self.executor = None\n self.run_manager = None\n self.num_of_processes = num_of_processes\n self.local_rank = local_rank\n\n self.done = False\n\n fqcn = FQCN.join([args.client_name, args.job_id, str(local_rank)])\n credentials = {}\n self.cell = Cell(\n fqcn=fqcn,\n root_url=args.root_url,\n secure=False,\n credentials=credentials,\n create_internal_listener=True,\n parent_url=args.parent_url,\n )\n self.cell.start()\n net_agent = NetAgent(self.cell)\n self.cell.register_request_cb(\n channel=CellChannel.CLIENT_SUB_WORKER_COMMAND,\n topic=\"*\",\n cb=self.execute_command,\n )\n mpm.add_cleanup_cb(net_agent.close)\n mpm.add_cleanup_cb(self.cell.stop)\n\n self.commands = {\n MultiProcessCommandNames.INITIALIZE: self._initialize,\n MultiProcessCommandNames.TASK_EXECUTION: self._execute_task,\n MultiProcessCommandNames.FIRE_EVENT: self._handle_event,\n MultiProcessCommandNames.CLOSE: self._close,\n }\n\n self.logger = logging.getLogger(self.__class__.__name__)\n\n def execute_command(self, request: CellMessage) -> CellMessage:\n command_name = request.get_header(MessageHeaderKey.TOPIC)\n data = request.payload\n\n if command_name not in self.commands:\n return make_reply(ReturnCode.INVALID_REQUEST, \"\", None)\n return self.commands[command_name](data)\n\n def _initialize(self, data):\n executor_id = data[CommunicationMetaData.LOCAL_EXECUTOR]\n components_conf = data[CommunicationMetaData.COMPONENTS]\n component_builder = WorkerComponentBuilder()\n for item in components_conf:\n cid = item.get(\"id\", None)\n if not cid:\n raise TypeError(\"missing component id\")\n self.components[cid] = component_builder.build_component(item)\n if isinstance(self.components[cid], FLComponent):\n self.handlers.append(self.components[cid])\n\n self.executor = self.components.get(executor_id, None)\n if not isinstance(self.executor, Executor):\n make_reply(\n ReturnCode.INVALID_REQUEST,\n \"invalid executor {}: expect Executor but got {}\".format(executor_id, type(self.executor)),\n None,\n )\n\n job_id = self.args.job_id\n self._get_client_run_manager(job_id)\n\n parent_fqcn = FQCN.join([self.args.client_name, self.args.job_id])\n relayer = EventRelayer(self.cell, parent_fqcn, self.local_rank)\n self.run_manager.add_handler(relayer)\n self.run_manager.components[CommunicationMetaData.RELAYER] = relayer\n\n with self.run_manager.new_context() as fl_ctx:\n fl_ctx.set_prop(FLContextKey.RANK_NUMBER, self.local_rank, private=True, sticky=True)\n fl_ctx.set_prop(FLContextKey.NUM_OF_PROCESSES, self.num_of_processes, private=True, sticky=True)\n\n event_data = {\n CommunicationMetaData.EVENT_TYPE: EventType.START_RUN,\n CommunicationMetaData.FL_CTX: data[CommunicationMetaData.FL_CTX],\n }\n relayer.relay_event(self.run_manager, event_data)\n\n return make_reply(ReturnCode.OK, \"\", None)\n\n def _get_client_run_manager(self, job_id):\n if self.args.simulator_engine.lower() == \"true\":\n self.run_manager = SimulatorClientRunManager(\n client_name=self.args.client_name,\n job_id=job_id,\n workspace=self.workspace,\n client=None,\n components=self.components,\n handlers=self.handlers,\n conf=None,\n )\n else:\n self.run_manager = ClientRunManager(\n client_name=self.args.client_name,\n job_id=job_id,\n workspace=self.workspace,\n client=None,\n components=self.components,\n handlers=self.handlers,\n conf=None,\n )\n\n def _execute_task(self, data):\n \"\"\"To execute the event task and pass to worker_process.\n\n Args:\n\n \"\"\"\n with self.run_manager.new_context() as fl_ctx:\n abort_signal = Signal()\n\n task_name = data[CommunicationMetaData.TASK_NAME]\n shareable = data[CommunicationMetaData.SHAREABLE]\n fl_ctx.props.update(data[CommunicationMetaData.FL_CTX].props)\n\n shareable = self.executor.execute(\n task_name=task_name, shareable=shareable, fl_ctx=fl_ctx, abort_signal=abort_signal\n )\n\n if self.local_rank == 0:\n return_data = {\n CommunicationMetaData.SHAREABLE: shareable,\n CommunicationMetaData.FL_CTX: get_serializable_data(fl_ctx),\n }\n request = new_cell_message({}, return_data)\n fqcn = FQCN.join([self.args.client_name, self.args.job_id])\n self.cell.send_request(\n target=fqcn,\n channel=CellChannel.MULTI_PROCESS_EXECUTOR,\n topic=CellChannelTopic.EXECUTE_RESULT,\n request=request,\n )\n\n def _handle_event(self, data):\n \"\"\"To handle the event.\n\n Args:\n\n \"\"\"\n event_relayer = self.run_manager.get_component(CommunicationMetaData.RELAYER)\n event_relayer.relay_event(self.run_manager, data)\n\n def _close(self, data):\n self.done = True\n self.cell.stop()\n # mpm.stop()\n\n def run(self):\n self.logger.info(\"SubWorkerExecutor process started.\")\n while not self.done:\n time.sleep(1.0)\n # self.cell.run()\n # mpm.run(\"Client sub_worker\")\n self.logger.info(\"SubWorkerExecutor process shutdown.\")\n\n def stop(self):\n self.done = True\n\n\ndef main(args):\n workspace = Workspace(args.workspace, args.client_name)\n app_custom_folder = workspace.get_client_custom_dir()\n if os.path.isdir(app_custom_folder):\n sys.path.append(app_custom_folder)\n configure_logging(workspace)\n\n fobs_initialize()\n\n SecurityContentService.initialize(content_folder=workspace.get_startup_kit_dir())\n\n # Initialize audit service since the job execution will need it!\n AuditService.initialize(workspace.get_audit_file_path())\n\n # configure privacy control!\n privacy_manager = create_privacy_manager(workspace, names_only=True)\n\n # initialize Privacy Service\n PrivacyService.initialize(privacy_manager)\n\n local_rank = int(os.environ[\"LOCAL_RANK\"])\n prefix = f\"rank{local_rank}\"\n set_stats_pool_config_for_job(workspace, args.job_id, prefix=prefix)\n\n num_of_processes = int(args.num_processes)\n sub_executor = SubWorkerExecutor(args, workspace, num_of_processes, local_rank)\n\n # start parent process checking thread\n parent_pid = args.parent_pid\n stop_event = threading.Event()\n thread = threading.Thread(target=monitor_parent_process, args=(sub_executor, parent_pid, stop_event))\n thread.start()\n\n job_id = args.job_id\n log_file = workspace.get_app_log_file_path(job_id)\n add_logfile_handler(log_file)\n logger = logging.getLogger(\"sub_worker_process\")\n\n sub_executor.run()\n\n AuditService.close()\n err = create_stats_pool_files_for_job(workspace, job_id, prefix=prefix)\n if err:\n logger.warning(err)\n\n\ndef parse_arguments():\n \"\"\"Sub_worker process program.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--workspace\", \"-m\", type=str, help=\"WORKSPACE folder\", required=True)\n parser.add_argument(\"--num_processes\", type=str, help=\"Listen ports\", required=True)\n parser.add_argument(\"--job_id\", \"-n\", type=str, help=\"job_id\", required=True)\n parser.add_argument(\"--client_name\", \"-c\", type=str, help=\"client name\", required=True)\n parser.add_argument(\"--simulator_engine\", \"-s\", type=str, help=\"simulator engine\", required=True)\n parser.add_argument(\"--parent_pid\", type=int, help=\"parent process pid\", required=True)\n parser.add_argument(\"--root_url\", type=str, help=\"root cell url\", required=True)\n parser.add_argument(\"--parent_url\", type=str, help=\"parent cell url\", required=True)\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n \"\"\"\n This is the program for running rank processes in multi-process mode.\n \"\"\"\n # main()\n args = parse_arguments()\n run_dir = os.path.join(args.workspace, args.job_id)\n mpm.run(main_func=main, run_dir=run_dir, args=args)\n",
"path": "nvflare/private/fed/app/client/sub_worker_process.py"
}
] | [
{
"content": "# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Sub_worker process to start the multi-processes client.\"\"\"\n\nimport argparse\nimport copy\nimport logging\nimport os\nimport sys\nimport threading\nimport time\n\nfrom nvflare.apis.event_type import EventType\nfrom nvflare.apis.executor import Executor\nfrom nvflare.apis.fl_component import FLComponent\nfrom nvflare.apis.fl_constant import FLContextKey\nfrom nvflare.apis.fl_context import FLContext\nfrom nvflare.apis.signal import Signal\nfrom nvflare.apis.utils.fl_context_utils import get_serializable_data\nfrom nvflare.apis.workspace import Workspace\nfrom nvflare.app_common.executors.multi_process_executor import WorkerComponentBuilder\nfrom nvflare.fuel.common.multi_process_executor_constants import (\n CommunicateData,\n CommunicationMetaData,\n MultiProcessCommandNames,\n)\nfrom nvflare.fuel.f3.cellnet.cell import Cell\nfrom nvflare.fuel.f3.cellnet.core_cell import Message as CellMessage\nfrom nvflare.fuel.f3.cellnet.core_cell import MessageHeaderKey, make_reply\nfrom nvflare.fuel.f3.cellnet.defs import ReturnCode\nfrom nvflare.fuel.f3.cellnet.fqcn import FQCN\nfrom nvflare.fuel.f3.cellnet.net_agent import NetAgent\nfrom nvflare.fuel.f3.mpm import MainProcessMonitor as mpm\nfrom nvflare.fuel.sec.audit import AuditService\nfrom nvflare.fuel.sec.security_content_service import SecurityContentService\nfrom nvflare.private.defs import CellChannel, CellChannelTopic, new_cell_message\nfrom nvflare.private.fed.app.fl_conf import create_privacy_manager\nfrom nvflare.private.fed.app.utils import monitor_parent_process\nfrom nvflare.private.fed.client.client_run_manager import ClientRunManager\nfrom nvflare.private.fed.runner import Runner\nfrom nvflare.private.fed.simulator.simulator_app_runner import SimulatorClientRunManager\nfrom nvflare.private.fed.utils.fed_utils import (\n add_logfile_handler,\n configure_logging,\n create_stats_pool_files_for_job,\n fobs_initialize,\n set_stats_pool_config_for_job,\n)\nfrom nvflare.private.privacy_manager import PrivacyService\n\n\nclass EventRelayer(FLComponent):\n \"\"\"To relay the event from the worker_process.\"\"\"\n\n def __init__(self, cell, parent_fqcn, local_rank):\n \"\"\"To init the EventRelayer.\n\n Args:\n cell: the local cell.\n parent_fqcn: FQCN of the parent cell\n local_rank: process local rank\n \"\"\"\n super().__init__()\n self.cell = cell\n self.parent_fqcn = parent_fqcn\n self.local_rank = local_rank\n\n self.event_lock = threading.Lock()\n self.start_run_fired = False\n\n def relay_event(self, run_manager, data):\n \"\"\"To relay the event.\n\n Args:\n run_manager: Client_Run_Manager\n data: event data\n\n \"\"\"\n with run_manager.new_context() as fl_ctx:\n event_type = data[CommunicationMetaData.EVENT_TYPE]\n if event_type == EventType.START_RUN:\n if self.start_run_fired:\n return\n else:\n self.start_run_fired = True\n fl_ctx.props.update(data[CommunicationMetaData.FL_CTX].props)\n\n fl_ctx.set_prop(\n FLContextKey.EVENT_ORIGIN_SITE, CommunicateData.MULTI_PROCESS_EXECUTOR, private=True, sticky=False\n )\n self.fire_event(event_type=event_type, fl_ctx=fl_ctx)\n\n def handle_event(self, event_type: str, fl_ctx: FLContext):\n \"\"\"To handle the event.\n\n Args:\n event_type: event_type\n fl_ctx: FLContext\n\n \"\"\"\n event_site = fl_ctx.get_prop(FLContextKey.EVENT_ORIGIN_SITE)\n\n new_fl_ctx = FLContext()\n new_fl_ctx.props.update(copy.deepcopy(get_serializable_data(fl_ctx).props))\n if event_site != CommunicateData.MULTI_PROCESS_EXECUTOR:\n with self.event_lock:\n try:\n data = {\n CommunicationMetaData.EVENT_TYPE: event_type,\n CommunicationMetaData.RANK_NUMBER: self.local_rank,\n CommunicationMetaData.FL_CTX: new_fl_ctx,\n }\n\n request = new_cell_message({}, data)\n return_data = self.cell.send_request(\n target=self.parent_fqcn,\n channel=CellChannel.MULTI_PROCESS_EXECUTOR,\n topic=CellChannelTopic.FIRE_EVENT,\n request=request,\n )\n # update the fl_ctx from the child process return data.\n fl_ctx.props.update(return_data.payload[CommunicationMetaData.FL_CTX].props)\n except Exception:\n self.log_warning(\n fl_ctx, f\"Failed to relay the event to parent process. Event: {event_type}\", fire_event=False\n )\n\n\nclass SubWorkerExecutor(Runner):\n def __init__(self, args, workspace, num_of_processes, local_rank) -> None:\n super().__init__()\n\n self.args = args\n self.workspace = workspace\n self.components = {}\n self.handlers = []\n self.executor = None\n self.run_manager = None\n self.num_of_processes = num_of_processes\n self.local_rank = local_rank\n\n self.done = False\n\n fqcn = FQCN.join([args.client_name, args.job_id, str(local_rank)])\n credentials = {}\n self.cell = Cell(\n fqcn=fqcn,\n root_url=args.root_url,\n secure=False,\n credentials=credentials,\n create_internal_listener=True,\n parent_url=args.parent_url,\n )\n self.cell.start()\n net_agent = NetAgent(self.cell)\n self.cell.register_request_cb(\n channel=CellChannel.CLIENT_SUB_WORKER_COMMAND,\n topic=\"*\",\n cb=self.execute_command,\n )\n mpm.add_cleanup_cb(net_agent.close)\n mpm.add_cleanup_cb(self.cell.stop)\n\n self.commands = {\n MultiProcessCommandNames.INITIALIZE: self._initialize,\n MultiProcessCommandNames.TASK_EXECUTION: self._execute_task,\n MultiProcessCommandNames.FIRE_EVENT: self._handle_event,\n MultiProcessCommandNames.CLOSE: self._close,\n }\n\n self.logger = logging.getLogger(self.__class__.__name__)\n\n def execute_command(self, request: CellMessage) -> CellMessage:\n command_name = request.get_header(MessageHeaderKey.TOPIC)\n data = request.payload\n\n if command_name not in self.commands:\n return make_reply(ReturnCode.INVALID_REQUEST, \"\", None)\n return self.commands[command_name](data)\n\n def _initialize(self, data):\n executor_id = data[CommunicationMetaData.LOCAL_EXECUTOR]\n components_conf = data[CommunicationMetaData.COMPONENTS]\n component_builder = WorkerComponentBuilder()\n for item in components_conf:\n cid = item.get(\"id\", None)\n if not cid:\n raise TypeError(\"missing component id\")\n self.components[cid] = component_builder.build_component(item)\n if isinstance(self.components[cid], FLComponent):\n self.handlers.append(self.components[cid])\n\n self.executor = self.components.get(executor_id, None)\n if not isinstance(self.executor, Executor):\n make_reply(\n ReturnCode.INVALID_REQUEST,\n \"invalid executor {}: expect Executor but got {}\".format(executor_id, type(self.executor)),\n None,\n )\n\n job_id = self.args.job_id\n self._get_client_run_manager(job_id)\n\n parent_fqcn = FQCN.join([self.args.client_name, self.args.job_id])\n relayer = EventRelayer(self.cell, parent_fqcn, self.local_rank)\n self.run_manager.add_handler(relayer)\n self.run_manager.components[CommunicationMetaData.RELAYER] = relayer\n\n with self.run_manager.new_context() as fl_ctx:\n fl_ctx.set_prop(FLContextKey.RANK_NUMBER, self.local_rank, private=True, sticky=True)\n fl_ctx.set_prop(FLContextKey.NUM_OF_PROCESSES, self.num_of_processes, private=True, sticky=True)\n\n event_data = {\n CommunicationMetaData.EVENT_TYPE: EventType.START_RUN,\n CommunicationMetaData.FL_CTX: data[CommunicationMetaData.FL_CTX],\n }\n relayer.relay_event(self.run_manager, event_data)\n\n return make_reply(ReturnCode.OK, \"\", None)\n\n def _get_client_run_manager(self, job_id):\n if self.args.simulator_engine.lower() == \"true\":\n self.run_manager = SimulatorClientRunManager(\n client_name=self.args.client_name,\n job_id=job_id,\n workspace=self.workspace,\n client=None,\n components=self.components,\n handlers=self.handlers,\n conf=None,\n )\n else:\n self.run_manager = ClientRunManager(\n client_name=self.args.client_name,\n job_id=job_id,\n workspace=self.workspace,\n client=None,\n components=self.components,\n handlers=self.handlers,\n conf=None,\n )\n\n def _execute_task(self, data):\n \"\"\"To execute the event task and pass to worker_process.\n\n Args:\n\n \"\"\"\n with self.run_manager.new_context() as fl_ctx:\n abort_signal = Signal()\n\n task_name = data[CommunicationMetaData.TASK_NAME]\n shareable = data[CommunicationMetaData.SHAREABLE]\n fl_ctx.props.update(data[CommunicationMetaData.FL_CTX].props)\n\n shareable = self.executor.execute(\n task_name=task_name, shareable=shareable, fl_ctx=fl_ctx, abort_signal=abort_signal\n )\n\n if self.local_rank == 0:\n return_data = {\n CommunicationMetaData.SHAREABLE: shareable,\n CommunicationMetaData.FL_CTX: get_serializable_data(fl_ctx),\n }\n request = new_cell_message({}, return_data)\n fqcn = FQCN.join([self.args.client_name, self.args.job_id])\n self.cell.send_request(\n target=fqcn,\n channel=CellChannel.MULTI_PROCESS_EXECUTOR,\n topic=CellChannelTopic.EXECUTE_RESULT,\n request=request,\n )\n\n def _handle_event(self, data):\n \"\"\"To handle the event.\n\n Args:\n\n \"\"\"\n event_relayer = self.run_manager.get_component(CommunicationMetaData.RELAYER)\n event_relayer.relay_event(self.run_manager, data)\n\n def _close(self, data):\n self.done = True\n\n def run(self):\n self.logger.info(\"SubWorkerExecutor process started.\")\n while not self.done:\n time.sleep(1.0)\n # self.cell.run()\n # mpm.run(\"Client sub_worker\")\n self.logger.info(\"SubWorkerExecutor process shutdown.\")\n\n def stop(self):\n self.done = True\n\n\ndef main(args):\n workspace = Workspace(args.workspace, args.client_name)\n app_custom_folder = workspace.get_client_custom_dir()\n if os.path.isdir(app_custom_folder):\n sys.path.append(app_custom_folder)\n configure_logging(workspace)\n\n fobs_initialize()\n\n SecurityContentService.initialize(content_folder=workspace.get_startup_kit_dir())\n\n # Initialize audit service since the job execution will need it!\n AuditService.initialize(workspace.get_audit_file_path())\n\n # configure privacy control!\n privacy_manager = create_privacy_manager(workspace, names_only=True)\n\n # initialize Privacy Service\n PrivacyService.initialize(privacy_manager)\n\n local_rank = int(os.environ[\"LOCAL_RANK\"])\n prefix = f\"rank{local_rank}\"\n set_stats_pool_config_for_job(workspace, args.job_id, prefix=prefix)\n\n num_of_processes = int(args.num_processes)\n sub_executor = SubWorkerExecutor(args, workspace, num_of_processes, local_rank)\n\n # start parent process checking thread\n parent_pid = args.parent_pid\n stop_event = threading.Event()\n thread = threading.Thread(target=monitor_parent_process, args=(sub_executor, parent_pid, stop_event))\n thread.start()\n\n job_id = args.job_id\n log_file = workspace.get_app_log_file_path(job_id)\n add_logfile_handler(log_file)\n logger = logging.getLogger(\"sub_worker_process\")\n\n sub_executor.run()\n\n AuditService.close()\n err = create_stats_pool_files_for_job(workspace, job_id, prefix=prefix)\n if err:\n logger.warning(err)\n\n\ndef parse_arguments():\n \"\"\"Sub_worker process program.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--workspace\", \"-m\", type=str, help=\"WORKSPACE folder\", required=True)\n parser.add_argument(\"--num_processes\", type=str, help=\"Listen ports\", required=True)\n parser.add_argument(\"--job_id\", \"-n\", type=str, help=\"job_id\", required=True)\n parser.add_argument(\"--client_name\", \"-c\", type=str, help=\"client name\", required=True)\n parser.add_argument(\"--simulator_engine\", \"-s\", type=str, help=\"simulator engine\", required=True)\n parser.add_argument(\"--parent_pid\", type=int, help=\"parent process pid\", required=True)\n parser.add_argument(\"--root_url\", type=str, help=\"root cell url\", required=True)\n parser.add_argument(\"--parent_url\", type=str, help=\"parent cell url\", required=True)\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n \"\"\"\n This is the program for running rank processes in multi-process mode.\n \"\"\"\n # main()\n args = parse_arguments()\n run_dir = os.path.join(args.workspace, args.job_id)\n mpm.run(main_func=main, run_dir=run_dir, args=args)\n",
"path": "nvflare/private/fed/app/client/sub_worker_process.py"
}
] | diff --git a/nvflare/private/fed/app/client/sub_worker_process.py b/nvflare/private/fed/app/client/sub_worker_process.py
index 9ad491f23b..ec911961bf 100644
--- a/nvflare/private/fed/app/client/sub_worker_process.py
+++ b/nvflare/private/fed/app/client/sub_worker_process.py
@@ -294,8 +294,6 @@ def _handle_event(self, data):
def _close(self, data):
self.done = True
- self.cell.stop()
- # mpm.stop()
def run(self):
self.logger.info("SubWorkerExecutor process started.")
|
comic__grand-challenge.org-2133 | Video maximize button not working in Chrome and Edge
The video maximize button (see [here](https://grand-challenge.org/documentation/create-your-own-algorithm/)) does not work in Chrome and Edge. It is greyed out, maximizing is not possible.
This button does work in Safari and Firefox, so this appears to be a browser issue.
Possibly this can be fixed with a browser specific attribute in the markdown or with some custom css?
| [
{
"content": "import os\nimport re\nfrom datetime import datetime, timedelta\nfrom distutils.util import strtobool as strtobool_i\nfrom itertools import product\n\nimport sentry_sdk\nfrom disposable_email_domains import blocklist\nfrom django.contrib.messages import constants as messages\nfrom django.urls import reverse\nfrom machina import MACHINA_MAIN_STATIC_DIR, MACHINA_MAIN_TEMPLATE_DIR\nfrom sentry_sdk.integrations.celery import CeleryIntegration\nfrom sentry_sdk.integrations.django import DjangoIntegration\nfrom sentry_sdk.integrations.logging import ignore_logger\n\nfrom config.denylist import USERNAME_DENYLIST\nfrom grandchallenge.algorithms.exceptions import ImageImportError\nfrom grandchallenge.components.exceptions import PriorStepFailed\nfrom grandchallenge.core.utils.markdown import BS4Extension\n\n\ndef strtobool(val) -> bool:\n \"\"\"Return disutils.util.strtobool as a boolean.\"\"\"\n return bool(strtobool_i(val))\n\n\nDEBUG = strtobool(os.environ.get(\"DEBUG\", \"False\"))\n\nCOMMIT_ID = os.environ.get(\"COMMIT_ID\", \"unknown\")\n\nADMINS = (\n # ('Your Name', '[email protected]'),\n)\n\n# Who gets the 404 notifications?\nmanager_email = os.environ.get(\"MANAGER_EMAIL\", None)\nif manager_email:\n MANAGERS = [(\"Manager\", manager_email)]\n\nIGNORABLE_404_URLS = [\n re.compile(r\".*\\.(php|cgi|asp).*\"),\n re.compile(r\"^/phpmyadmin.*\"),\n re.compile(r\"^/gen204.*\"),\n re.compile(r\"^/wp-content.*\"),\n re.compile(r\"^/wp.*\"),\n re.compile(r\"^/wordpress/.*\"),\n re.compile(r\"^/old/.*\", flags=re.IGNORECASE),\n re.compile(r\".*/trackback.*\"),\n re.compile(r\"^/site/.*\"),\n re.compile(r\"^/media/cache/.*\"),\n re.compile(r\"^/favicon.ico$\"),\n]\n\n# Used as starting points for various other paths. realpath(__file__) starts in\n# the config dir. We need to go one dir higher so path.join(\"..\")\nSITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": os.environ.get(\"POSTGRES_DB\", \"grandchallenge\"),\n \"USER\": os.environ.get(\"POSTGRES_USER\", \"grandchallenge\"),\n \"PASSWORD\": os.environ.get(\"POSTGRES_PASSWORD\", \"secretpassword\"),\n \"HOST\": os.environ.get(\"POSTGRES_HOST\", \"postgres\"),\n \"PORT\": os.environ.get(\"POSTGRES_PORT\", \"\"),\n \"OPTIONS\": {\n \"sslmode\": os.environ.get(\"POSTGRES_SSL_MODE\", \"prefer\"),\n \"sslrootcert\": os.path.join(\n SITE_ROOT, \"config\", \"certs\", \"rds-ca-2019-root.pem\"\n ),\n },\n \"ATOMIC_REQUESTS\": strtobool(\n os.environ.get(\"ATOMIC_REQUESTS\", \"True\")\n ),\n }\n}\n\nEMAIL_BACKEND = \"djcelery_email.backends.CeleryEmailBackend\"\nCELERY_EMAIL_BACKEND = \"django_ses.SESBackend\"\nDEFAULT_FROM_EMAIL = os.environ.get(\n \"DEFAULT_FROM_EMAIL\", \"webmaster@localhost\"\n)\nSERVER_EMAIL = os.environ.get(\"SERVER_EMAIL\", \"root@localhost\")\n\nANONYMOUS_USER_NAME = \"AnonymousUser\"\nREGISTERED_USERS_GROUP_NAME = \"__registered_users_group__\"\nREGISTERED_AND_ANON_USERS_GROUP_NAME = \"__registered_and_anonymous_users__\"\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = \"UTC\"\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = \"en-us\"\n\nSITE_ID = int(os.environ.get(\"SITE_ID\", \"1\"))\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# General forum\nDOCUMENTATION_HELP_FORUM_PK = os.environ.get(\n \"DOCUMENTATION_HELP_FORUM_PK\", \"1\"\n)\nDOCUMENTATION_HELP_FORUM_SLUG = os.environ.get(\n \"DOCUMENTATION_HELP_FORUM_SLUG\", \"general\"\n)\n\n# About Flatpage\nFLATPAGE_ABOUT_URL = os.environ.get(\"FLATPAGE_ABOUT_URL\", \"/about/\")\n\n##############################################################################\n#\n# Storage\n#\n##############################################################################\nDEFAULT_FILE_STORAGE = \"grandchallenge.core.storage.PublicS3Storage\"\n\n# Subdirectories on root for various files\nJQFILEUPLOAD_UPLOAD_SUBIDRECTORY = \"jqfileupload\"\nIMAGE_FILES_SUBDIRECTORY = \"images\"\nEVALUATION_FILES_SUBDIRECTORY = \"evaluation\"\nCOMPONENTS_FILES_SUBDIRECTORY = \"components\"\n\nAWS_S3_FILE_OVERWRITE = False\n# Note: deprecated in django storages 2.0\nAWS_BUCKET_ACL = \"private\"\nAWS_DEFAULT_ACL = \"private\"\nAWS_S3_MAX_MEMORY_SIZE = 1_048_576 # 100 MB\nAWS_S3_ENDPOINT_URL = os.environ.get(\"AWS_S3_ENDPOINT_URL\", None)\nAWS_DEFAULT_REGION = os.environ.get(\"AWS_DEFAULT_REGION\", \"eu-central-1\")\nAWS_SES_REGION_ENDPOINT = f\"email.{AWS_DEFAULT_REGION}.amazonaws.com\"\n\n# This is for storing files that should not be served to the public\nPRIVATE_S3_STORAGE_KWARGS = {\n \"bucket_name\": os.environ.get(\n \"PRIVATE_S3_STORAGE_BUCKET_NAME\", \"grand-challenge-private\"\n ),\n}\n\nPROTECTED_S3_STORAGE_KWARGS = {\n \"bucket_name\": os.environ.get(\n \"PROTECTED_S3_STORAGE_BUCKET_NAME\", \"grand-challenge-protected\"\n ),\n # This is the domain where people will be able to go to download data\n # from this bucket. Usually we would use reverse to find this out,\n # but this needs to be defined before the database is populated\n \"custom_domain\": os.environ.get(\n \"PROTECTED_S3_CUSTOM_DOMAIN\", \"gc.localhost/media\"\n ),\n}\nPROTECTED_S3_STORAGE_USE_CLOUDFRONT = strtobool(\n os.environ.get(\"PROTECTED_S3_STORAGE_USE_CLOUDFRONT\", \"False\")\n)\nPROTECTED_S3_STORAGE_CLOUDFRONT_DOMAIN = os.environ.get(\n \"PROTECTED_S3_STORAGE_CLOUDFRONT_DOMAIN_NAME\", \"\"\n)\n\nPUBLIC_S3_STORAGE_KWARGS = {\n \"bucket_name\": os.environ.get(\n \"PUBLIC_S3_STORAGE_BUCKET_NAME\", \"grand-challenge-public\"\n ),\n # Public bucket so do not use querystring_auth\n \"querystring_auth\": False,\n \"default_acl\": \"public-read\",\n}\n\nUPLOADS_S3_BUCKET_NAME = os.environ.get(\n \"UPLOADS_S3_BUCKET_NAME\", \"grand-challenge-uploads\"\n)\nUPLOADS_S3_USE_ACCELERATE_ENDPOINT = strtobool(\n os.environ.get(\"UPLOADS_S3_USE_ACCELERATE_ENDPOINT\", \"False\")\n)\nUPLOADS_MAX_SIZE_UNVERIFIED = int(\n os.environ.get(\"UPLOADS_MAX_SIZE_UNVERIFIED\", 2 * 1024 * 1024 * 1024)\n)\nUPLOADS_MAX_SIZE_VERIFIED = int(\n os.environ.get(\"UPLOADS_MAX_SIZE_VERIFIED\", 128 * 1024 * 1024 * 1024)\n)\n\n# Key pair used for signing CloudFront URLS, only used if\n# PROTECTED_S3_STORAGE_USE_CLOUDFRONT is True\nCLOUDFRONT_KEY_PAIR_ID = os.environ.get(\"CLOUDFRONT_KEY_PAIR_ID\", \"\")\nCLOUDFRONT_PRIVATE_KEY_BASE64 = os.environ.get(\n \"CLOUDFRONT_PRIVATE_KEY_BASE64\", \"\"\n)\nCLOUDFRONT_URL_EXPIRY_SECONDS = int(\n os.environ.get(\"CLOUDFRONT_URL_EXPIRY_SECONDS\", \"300\") # 5 mins\n)\n\n##############################################################################\n#\n# Caching\n#\n##############################################################################\nREDIS_HOSTNAME = os.environ.get(\"REDIS_HOSTNAME\", \"redis\")\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": f\"redis://{REDIS_HOSTNAME}:6379/1\",\n \"OPTIONS\": {\"CLIENT_CLASS\": \"django_redis.client.DefaultClient\"},\n },\n \"machina_attachments\": {\n \"BACKEND\": \"django.core.cache.backends.filebased.FileBasedCache\",\n \"LOCATION\": \"/tmp\",\n },\n}\n\nROOT_URLCONF = \"config.urls.root\"\nCHALLENGE_SUBDOMAIN_URL_CONF = \"config.urls.challenge_subdomain\"\nRENDERING_SUBDOMAIN_URL_CONF = \"config.urls.rendering_subdomain\"\nDEFAULT_SCHEME = os.environ.get(\"DEFAULT_SCHEME\", \"https\")\n\n# Workaround for https://github.com/ellmetha/django-machina/issues/219\nABSOLUTE_URL_OVERRIDES = {\n \"forum.forum\": lambda o: reverse(\n \"forum:forum\", kwargs={\"slug\": o.slug, \"pk\": o.pk},\n ),\n \"forum_conversation.topic\": lambda o: reverse(\n \"forum_conversation:topic\",\n kwargs={\n \"slug\": o.slug,\n \"pk\": o.pk,\n \"forum_slug\": o.forum.slug,\n \"forum_pk\": o.forum.pk,\n },\n ),\n}\n\nSESSION_COOKIE_DOMAIN = os.environ.get(\n \"SESSION_COOKIE_DOMAIN\", \".gc.localhost\"\n)\n# We're always running behind a proxy so set these to true\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SECURE = True\n# Trust all subdomains for CSRF, used for jqfileupload. Changed the name\n# of the CSRF token as existing ones are already in use.\nCSRF_COOKIE_DOMAIN = SESSION_COOKIE_DOMAIN\nCSRF_COOKIE_NAME = \"_csrftoken\"\nCSRF_TRUSTED_ORIGINS = [\n SESSION_COOKIE_DOMAIN,\n]\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\n# Set the allowed hosts to the cookie domain\nALLOWED_HOSTS = [SESSION_COOKIE_DOMAIN, \"web\"]\n\n# Security options\nSECURE_HSTS_SECONDS = int(os.environ.get(\"SECURE_HSTS_SECONDS\", \"0\"))\nSECURE_HSTS_INCLUDE_SUBDOMAINS = strtobool(\n os.environ.get(\"SECURE_HSTS_INCLUDE_SUBDOMAINS\", \"False\")\n)\nSECURE_HSTS_PRELOAD = strtobool(os.environ.get(\"SECURE_HSTS_PRELOAD\", \"True\"))\nSECURE_CONTENT_TYPE_NOSNIFF = strtobool(\n os.environ.get(\"SECURE_CONTENT_TYPE_NOSNIFF\", \"False\")\n)\nSECURE_BROWSER_XSS_FILTER = strtobool(\n os.environ.get(\"SECURE_BROWSER_XSS_FILTER\", \"False\")\n)\nX_FRAME_OPTIONS = os.environ.get(\"X_FRAME_OPTIONS\", \"DENY\")\n# \"strict-origin-when-cross-origin\" required for uploads for cross domain POSTs\nSECURE_REFERRER_POLICY = os.environ.get(\n \"SECURE_REFERRER_POLICY\", \"strict-origin-when-cross-origin\"\n)\n\nPERMISSIONS_POLICY = {\n \"accelerometer\": [],\n \"ambient-light-sensor\": [],\n \"autoplay\": [],\n \"camera\": [],\n \"display-capture\": [],\n \"document-domain\": [],\n \"encrypted-media\": [],\n \"fullscreen\": [],\n \"geolocation\": [],\n \"gyroscope\": [],\n \"interest-cohort\": [],\n \"magnetometer\": [],\n \"microphone\": [],\n \"midi\": [],\n \"payment\": [],\n \"usb\": [],\n}\n\nIPWARE_META_PRECEDENCE_ORDER = (\n # Set by nginx\n \"HTTP_X_FORWARDED_FOR\",\n \"HTTP_X_REAL_IP\",\n)\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = \"/static/\"\n\nSTATIC_HOST = os.environ.get(\"DJANGO_STATIC_HOST\", \"\")\nSTATIC_URL = f\"{STATIC_HOST}/static/\"\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n)\n\n# Vendored static files will be put here\nSTATICFILES_DIRS = [\"/opt/static/\", MACHINA_MAIN_STATIC_DIR]\n\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = os.environ.get(\n \"SECRET_KEY\", \"d=%^l=xa02an9jn-$!*hy1)5yox$a-$2(ejt-2smimh=j4%8*b\"\n)\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n # Override the machina templates, everything else is found with\n # django.template.loaders.app_directories.Loader\n os.path.join(SITE_ROOT, \"grandchallenge/forums/templates/\"),\n MACHINA_MAIN_TEMPLATE_DIR,\n ],\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.i18n\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.static\",\n \"django.template.context_processors.tz\",\n \"django.template.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n \"grandchallenge.core.context_processors.challenge\",\n \"grandchallenge.core.context_processors.deployment_info\",\n \"grandchallenge.core.context_processors.debug\",\n \"grandchallenge.core.context_processors.sentry_dsn\",\n \"grandchallenge.core.context_processors.footer_links\",\n \"grandchallenge.core.context_processors.help_forum\",\n \"grandchallenge.core.context_processors.about_page\",\n \"machina.core.context_processors.metadata\",\n ],\n \"loaders\": [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n ],\n },\n }\n]\n\nMIDDLEWARE = (\n \"django.middleware.security.SecurityMiddleware\", # Keep security at top\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n # Keep whitenoise after security and before all else\n \"aws_xray_sdk.ext.django.middleware.XRayMiddleware\", # xray near the top\n \"corsheaders.middleware.CorsMiddleware\", # Keep CORS near the top\n \"django.middleware.common.BrokenLinkEmailsMiddleware\",\n # Keep BrokenLinkEmailsMiddleware near the top\n \"django_permissions_policy.PermissionsPolicyMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.contrib.sites.middleware.CurrentSiteMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"simple_history.middleware.HistoryRequestMiddleware\",\n # subdomain_middleware after CurrentSiteMiddleware\n \"grandchallenge.subdomains.middleware.subdomain_middleware\",\n \"grandchallenge.subdomains.middleware.challenge_subdomain_middleware\",\n \"grandchallenge.subdomains.middleware.subdomain_urlconf_middleware\",\n \"grandchallenge.timezones.middleware.TimezoneMiddleware\",\n \"machina.apps.forum_permission.middleware.ForumPermissionMiddleware\",\n # Flatpage fallback almost last\n \"django.contrib.flatpages.middleware.FlatpageFallbackMiddleware\",\n # Redirects last as they're a last resort\n \"django.contrib.redirects.middleware.RedirectFallbackMiddleware\",\n)\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = \"config.wsgi.application\"\n\nDJANGO_APPS = [\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.sites\",\n \"django.contrib.messages\",\n \"whitenoise.runserver_nostatic\", # Keep whitenoise above staticfiles\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"django.contrib.admin\",\n \"django.contrib.postgres\",\n \"django.contrib.flatpages\",\n \"django.contrib.sitemaps\",\n \"django.contrib.redirects\",\n]\n\nTHIRD_PARTY_APPS = [\n \"aws_xray_sdk.ext.django\", # tracing\n \"django_celery_results\", # database results backend\n \"django_celery_beat\", # periodic tasks\n \"djcelery_email\", # asynchronous emails\n \"guardian\", # per object permissions\n \"rest_framework\", # provides REST API\n \"knox\", # token auth for REST API\n \"crispy_forms\", # bootstrap forms\n \"django_select2\", # for multiple choice widgets\n \"django_summernote\", # for WYSIWYG page editing\n \"dal\", # for autocompletion of selection fields\n \"dal_select2\", # for autocompletion of selection fields\n \"django_extensions\", # custom extensions\n \"simple_history\", # for object history\n \"corsheaders\", # to allow api communication from subdomains\n \"markdownx\", # for editing markdown\n \"stdimage\",\n \"django_filters\",\n \"drf_spectacular\",\n \"allauth\",\n \"allauth.account\",\n \"allauth.socialaccount\",\n \"grandchallenge.profiles.providers.gmail\",\n # Notifications with overrides\n \"actstream\",\n \"grandchallenge.notifications\",\n # django-machina dependencies:\n \"mptt\",\n \"haystack\",\n \"widget_tweaks\",\n # djano-machina apps:\n \"machina\",\n \"machina.apps.forum\",\n \"machina.apps.forum_conversation.forum_attachments\",\n \"machina.apps.forum_conversation.forum_polls\",\n \"machina.apps.forum_feeds\",\n \"machina.apps.forum_moderation\",\n \"machina.apps.forum_search\",\n \"machina.apps.forum_tracking\",\n \"machina.apps.forum_permission\",\n # Overridden apps\n \"grandchallenge.forum_conversation\",\n \"grandchallenge.forum_member\",\n]\n\nLOCAL_APPS = [\n \"grandchallenge.admins\",\n \"grandchallenge.anatomy\",\n \"grandchallenge.api\",\n \"grandchallenge.api_tokens\",\n \"grandchallenge.challenges\",\n \"grandchallenge.core\",\n \"grandchallenge.evaluation\",\n \"grandchallenge.jqfileupload\",\n \"grandchallenge.pages\",\n \"grandchallenge.participants\",\n \"grandchallenge.profiles\",\n \"grandchallenge.teams\",\n \"grandchallenge.uploads\",\n \"grandchallenge.cases\",\n \"grandchallenge.algorithms\",\n \"grandchallenge.components\",\n \"grandchallenge.statistics\",\n \"grandchallenge.archives\",\n \"grandchallenge.patients\",\n \"grandchallenge.studies\",\n \"grandchallenge.registrations\",\n \"grandchallenge.annotations\",\n \"grandchallenge.retina_core\",\n \"grandchallenge.retina_api\",\n \"grandchallenge.workstations\",\n \"grandchallenge.workspaces\",\n \"grandchallenge.reader_studies\",\n \"grandchallenge.workstation_configs\",\n \"grandchallenge.policies\",\n \"grandchallenge.products\",\n \"grandchallenge.serving\",\n \"grandchallenge.blogs\",\n \"grandchallenge.publications\",\n \"grandchallenge.verifications\",\n \"grandchallenge.credits\",\n \"grandchallenge.task_categories\",\n \"grandchallenge.modalities\",\n \"grandchallenge.datatables\",\n \"grandchallenge.organizations\",\n \"grandchallenge.groups\",\n \"grandchallenge.github\",\n \"grandchallenge.codebuild\",\n \"grandchallenge.timezones\",\n \"grandchallenge.documentation\",\n \"grandchallenge.flatpages\",\n]\n\nINSTALLED_APPS = DJANGO_APPS + LOCAL_APPS + THIRD_PARTY_APPS\n\nADMIN_URL = f'{os.environ.get(\"DJANGO_ADMIN_URL\", \"django-admin\")}/'\n\nAUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.ModelBackend\",\n \"allauth.account.auth_backends.AuthenticationBackend\",\n \"guardian.backends.ObjectPermissionBackend\",\n]\n\nGOOGLE_ANALYTICS_ID = os.environ.get(\"GOOGLE_ANALYTICS_ID\", \"GA_TRACKING_ID\")\n\n##############################################################################\n#\n# django-allauth\n#\n##############################################################################\n\nACCOUNT_ADAPTER = \"grandchallenge.profiles.adapters.AccountAdapter\"\nACCOUNT_SIGNUP_FORM_CLASS = \"grandchallenge.profiles.forms.SignupForm\"\n\nACCOUNT_AUTHENTICATION_METHOD = \"username_email\"\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = \"mandatory\"\nACCOUNT_USERNAME_MIN_LENGTH = 4\nACCOUNT_DEFAULT_HTTP_PROTOCOL = \"https\"\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_USERNAME_BLACKLIST = USERNAME_DENYLIST\n\nSOCIALACCOUNT_ADAPTER = \"grandchallenge.profiles.adapters.SocialAccountAdapter\"\nSOCIALACCOUNT_AUTO_SIGNUP = False\nSOCIALACCOUNT_STORE_TOKENS = False\nSOCIALACCOUNT_PROVIDERS = {\n \"gmail\": {\n \"APP\": {\n \"client_id\": os.environ.get(\"SOCIAL_AUTH_GOOGLE_OAUTH2_KEY\", \"\"),\n \"secret\": os.environ.get(\"SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET\", \"\"),\n }\n }\n}\n\n# Use full paths as view name lookups do not work on subdomains\nLOGIN_URL = \"/accounts/login/\"\nLOGOUT_URL = \"/accounts/logout/\"\nLOGIN_REDIRECT_URL = \"/users/profile/\"\n\n##############################################################################\n#\n# stdimage\n#\n##############################################################################\n\n# Re-render the existing images if these values change\n# https://github.com/codingjoe/django-stdimage#re-rendering-variations\nSTDIMAGE_LOGO_VARIATIONS = {\n # Must be square\n \"full\": (None, None, False),\n \"x20\": (640, 640, True),\n \"x15\": (480, 480, True),\n \"x10\": (320, 320, True),\n \"x02\": (64, 64, True),\n}\nSTDIMAGE_SOCIAL_VARIATIONS = {\n # Values from social sharing\n \"full\": (None, None, False),\n \"x20\": (1280, 640, False),\n \"x15\": (960, 480, False),\n \"x10\": (640, 320, False),\n}\nSTDIMAGE_BANNER_VARIATIONS = {\n # Fixed width, any height\n \"full\": (None, None, False),\n \"x20\": (2220, None, False),\n \"x15\": (1665, None, False),\n \"x10\": (1110, None, False),\n}\n\n##############################################################################\n#\n# actstream\n#\n##############################################################################\n\nACTSTREAM_ENABLE = strtobool(os.environ.get(\"ACTSTREAM_ENABLE\", \"True\"))\nACTSTREAM_SETTINGS = {\n \"MANAGER\": \"actstream.managers.ActionManager\",\n \"FETCH_RELATIONS\": True,\n \"USE_JSONFIELD\": True,\n}\n\n##############################################################################\n#\n# django-summernote\n#\n##############################################################################\n\n# WYSIWYG editing with Summernote\nSUMMERNOTE_THEME = \"bs4\"\nSUMMERNOTE_CONFIG = {\n \"attachment_model\": \"uploads.SummernoteAttachment\",\n \"attachment_require_authentication\": True,\n \"summernote\": {\n \"width\": \"100%\",\n \"toolbar\": [\n [\"style\", [\"style\"]],\n [\n \"font\",\n [\"bold\", \"italic\", \"underline\", \"strikethrough\", \"clear\"],\n ],\n [\"para\", [\"ul\", \"ol\", \"paragraph\"]],\n [\"insert\", [\"link\", \"picture\", \"hr\"]],\n [\"view\", [\"fullscreen\", \"codeview\"]],\n [\"help\", [\"help\"]],\n ],\n },\n}\n\n# Settings for allowed HTML\nBLEACH_ALLOWED_TAGS = [\n \"a\",\n \"abbr\",\n \"acronym\",\n \"b\",\n \"blockquote\",\n \"br\",\n \"code\",\n \"col\",\n \"div\",\n \"em\",\n \"h1\",\n \"h2\",\n \"h3\",\n \"h4\",\n \"h5\",\n \"h6\",\n \"hr\",\n \"i\",\n \"img\",\n \"li\",\n \"ol\",\n \"p\",\n \"pre\",\n \"span\",\n \"strike\",\n \"strong\",\n \"table\",\n \"tbody\",\n \"thead\",\n \"td\",\n \"th\",\n \"tr\",\n \"u\",\n \"ul\",\n \"video\",\n]\nBLEACH_ALLOWED_ATTRIBUTES = {\n \"*\": [\"class\", \"data-toggle\", \"id\", \"style\", \"role\"],\n \"a\": [\"href\", \"title\", \"target\", \"rel\"],\n \"abbr\": [\"title\"],\n \"acronym\": [\"title\"],\n \"img\": [\"height\", \"src\", \"width\"],\n # For bootstrap tables: https://getbootstrap.com/docs/4.3/content/tables/\n \"th\": [\"scope\", \"colspan\"],\n \"td\": [\"colspan\"],\n \"video\": [\"src\", \"loop\", \"controls\", \"poster\"],\n}\nBLEACH_ALLOWED_STYLES = [\"height\", \"margin-left\", \"text-align\", \"width\"]\nBLEACH_ALLOWED_PROTOCOLS = [\"http\", \"https\", \"mailto\"]\nBLEACH_STRIP = strtobool(os.environ.get(\"BLEACH_STRIP\", \"True\"))\n\n# The markdown processor\nMARKDOWNX_MEDIA_PATH = datetime.now().strftime(\"i/%Y/%m/%d/\")\nMARKDOWNX_MARKDOWN_EXTENSIONS = [\n \"markdown.extensions.fenced_code\",\n \"markdown.extensions.tables\",\n \"markdown.extensions.sane_lists\",\n \"markdown.extensions.codehilite\",\n BS4Extension(),\n]\nMARKDOWNX_MARKDOWNIFY_FUNCTION = (\n \"grandchallenge.core.templatetags.bleach.md2html\"\n)\nMARKDOWNX_MARKDOWN_EXTENSION_CONFIGS = {}\nMARKDOWNX_IMAGE_MAX_SIZE = {\"size\": (2000, 0), \"quality\": 90}\n\nHAYSTACK_CONNECTIONS = {\n \"default\": {\"ENGINE\": \"haystack.backends.simple_backend.SimpleEngine\"},\n}\n\nFORUMS_CHALLENGE_CATEGORY_NAME = \"Challenges\"\nMACHINA_BASE_TEMPLATE_NAME = \"base.html\"\nMACHINA_PROFILE_AVATARS_ENABLED = False\nMACHINA_FORUM_NAME = \"Grand Challenge Forums\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\"\n },\n {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"},\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"\n },\n]\n\n# A sample logging configuration. More info in configuration can be found at\n# https://docs.djangoproject.com/en/dev/topics/logging/ .\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\"console\": {\"class\": \"logging.StreamHandler\"}},\n \"loggers\": {\n \"grandchallenge\": {\n \"level\": os.environ.get(\"GRAND_CHALLENGE_LOG_LEVEL\", \"INFO\"),\n \"handlers\": [\"console\"],\n \"propagate\": True,\n },\n \"django\": {\n \"level\": os.environ.get(\"DJANGO_LOG_LEVEL\", \"INFO\"),\n \"handlers\": [\"console\"],\n \"propagate\": True,\n },\n \"werkzeug\": {\n \"handlers\": [\"console\"],\n \"level\": \"DEBUG\",\n \"propagate\": True,\n },\n # As AWS_XRAY_CONTEXT_MISSING can only be set to LOG_ERROR,\n # silence errors from this sdk as they flood the logs in\n # RedirectFallbackMiddleware\n \"aws_xray_sdk\": {\n \"handlers\": [\"console\"],\n \"level\": \"CRITICAL\",\n \"propagate\": True,\n },\n },\n}\n\n###############################################################################\n# SENTRY\n###############################################################################\n\nSENTRY_DSN = os.environ.get(\"DJANGO_SENTRY_DSN\", \"\")\nSENTRY_ENABLE_JS_REPORTING = strtobool(\n os.environ.get(\"SENTRY_ENABLE_JS_REPORTING\", \"False\")\n)\nWORKSTATION_SENTRY_DSN = os.environ.get(\"WORKSTATION_SENTRY_DSN\", \"\")\n\nif SENTRY_DSN:\n sentry_sdk.init(\n dsn=SENTRY_DSN,\n integrations=[DjangoIntegration(), CeleryIntegration()],\n release=COMMIT_ID,\n traces_sample_rate=float(\n os.environ.get(\"SENTRY_TRACES_SAMPLE_RATE\", \"0.0\")\n ),\n ignore_errors=[PriorStepFailed, ImageImportError],\n )\n ignore_logger(\"django.security.DisallowedHost\")\n ignore_logger(\"aws_xray_sdk\")\n\n###############################################################################\n# XRAY\n###############################################################################\nXRAY_RECORDER = {\n \"AWS_XRAY_CONTEXT_MISSING\": \"LOG_ERROR\",\n \"PLUGINS\": (\"ECSPlugin\",),\n \"AWS_XRAY_TRACING_NAME\": SESSION_COOKIE_DOMAIN.lstrip(\".\"),\n}\n\n###############################################################################\n#\n# django-rest-framework and drf-spectacular\n#\n###############################################################################\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAdminUser\",),\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"knox.auth.TokenAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n \"DEFAULT_RENDERER_CLASSES\": [\"rest_framework.renderers.JSONRenderer\"],\n \"DEFAULT_PAGINATION_CLASS\": \"grandchallenge.api.pagination.MaxLimit1000OffsetPagination\",\n \"PAGE_SIZE\": 100,\n \"UNAUTHENTICATED_USER\": \"guardian.utils.get_anonymous_user\",\n \"DEFAULT_SCHEMA_CLASS\": \"drf_spectacular.openapi.AutoSchema\",\n}\n\nSPECTACULAR_SETTINGS = {\n \"SCHEMA_PATH_PREFIX\": r\"/api/v[0-9]\",\n \"TITLE\": f\"{SESSION_COOKIE_DOMAIN.lstrip('.')} API\",\n \"DESCRIPTION\": f\"The API for {SESSION_COOKIE_DOMAIN.lstrip('.')}.\",\n \"TOS\": f\"https://{SESSION_COOKIE_DOMAIN.lstrip('.')}/policies/terms-of-service/\",\n \"LICENSE\": {\"name\": \"Apache License 2.0\"},\n \"VERSION\": \"1.0.0\",\n}\n\nREST_KNOX = {\n \"AUTH_HEADER_PREFIX\": \"Bearer\",\n}\n\n###############################################################################\n#\n# CORS\n#\n###############################################################################\n\nVALID_SUBDOMAIN_REGEX = r\"[A-Za-z0-9](?:[A-Za-z0-9\\-]{0,61}[A-Za-z0-9])?\"\nCORS_ORIGIN_REGEX_WHITELIST = [\n rf\"^https:\\/\\/{VALID_SUBDOMAIN_REGEX}{re.escape(SESSION_COOKIE_DOMAIN)}$\",\n rf\"^https:\\/\\/{VALID_SUBDOMAIN_REGEX}.static.observableusercontent.com$\",\n]\n# SESSION_COOKIE_SAMESITE should be set to \"lax\" so won't send credentials\n# across domains, but this will allow workstations to access the api\nCORS_ALLOW_CREDENTIALS = True\n\n###############################################################################\n#\n# celery\n#\n###############################################################################\n\nCELERY_TASK_DECORATOR_KWARGS = {\n \"acks-late-2xlarge\": {\n # For idempotent tasks that take a long time (<7200s)\n # or require a large amount of memory\n \"acks_late\": True,\n \"reject_on_worker_lost\": True,\n \"queue\": \"acks-late-2xlarge\",\n },\n \"acks-late-micro-short\": {\n # For idempotent tasks that take a short time (<300s)\n # and do not require a large amount of memory\n \"acks_late\": True,\n \"reject_on_worker_lost\": True,\n \"queue\": \"acks-late-micro-short\",\n },\n}\n\nCELERY_RESULT_BACKEND = os.environ.get(\"CELERY_RESULT_BACKEND\", \"django-db\")\nCELERY_RESULT_PERSISTENT = True\nCELERY_TASK_ACKS_LATE = strtobool(\n os.environ.get(\"CELERY_TASK_ACKS_LATE\", \"False\")\n)\nCELERY_WORKER_PREFETCH_MULTIPLIER = int(\n os.environ.get(\"CELERY_WORKER_PREFETCH_MULTIPLIER\", \"1\")\n)\nCELERY_TASK_SOFT_TIME_LIMIT = int(\n os.environ.get(\"CELERY_TASK_SOFT_TIME_LIMIT\", \"7200\")\n)\nCELERY_TASK_TIME_LIMIT = int(os.environ.get(\"CELERY_TASK_TIME_LIMIT\", \"7260\"))\nCELERY_BROKER_TRANSPORT_OPTIONS = {\n \"visibility_timeout\": int(1.1 * CELERY_TASK_TIME_LIMIT)\n}\nCELERY_BROKER_CONNECTION_MAX_RETRIES = 0\n\nif os.environ.get(\"BROKER_TYPE\", \"\").lower() == \"sqs\":\n CELERY_BROKER_URL = \"sqs://\"\n\n CELERY_WORKER_ENABLE_REMOTE_CONTROL = False\n CELERY_BROKER_USE_SSL = True\n\n CELERY_BROKER_TRANSPORT_OPTIONS.update(\n {\n \"queue_name_prefix\": os.environ.get(\n \"CELERY_BROKER_QUEUE_NAME_PREFIX\", \"gclocalhost-\"\n ),\n \"region\": os.environ.get(\n \"CELERY_BROKER_REGION\", AWS_DEFAULT_REGION\n ),\n \"polling_interval\": int(\n os.environ.get(\"CELERY_BROKER_POLLING_INTERVAL\", \"1\")\n ),\n }\n )\nelse:\n CELERY_BROKER_URL = os.environ.get(\n \"BROKER_URL\", f\"redis://{REDIS_HOSTNAME}:6379/0\"\n )\n\n# Keep results of sent emails\nCELERY_EMAIL_CHUNK_SIZE = 1\nCELERY_EMAIL_TASK_CONFIG = {\n \"ignore_result\": False,\n}\n\nCOMPONENTS_DEFAULT_BACKEND = os.environ.get(\n \"COMPONENTS_DEFAULT_BACKEND\",\n \"grandchallenge.components.backends.amazon_ecs.AmazonECSExecutor\",\n)\nCOMPONENTS_REGISTRY_URL = os.environ.get(\n \"COMPONENTS_REGISTRY_URL\", \"registry:5000\"\n)\nCOMPONENTS_REGISTRY_PREFIX = os.environ.get(\n \"COMPONENTS_REGISTRY_PREFIX\", SESSION_COOKIE_DOMAIN.lstrip(\".\")\n)\nCOMPONENTS_REGISTRY_INSECURE = strtobool(\n os.environ.get(\"COMPONENTS_REGISTRY_INSECURE\", \"False\")\n)\nCOMPONENTS_MAXIMUM_IMAGE_SIZE = 10_737_418_240 # 10 gb\nCOMPONENTS_AMAZON_ECS_NFS_MOUNT_POINT = os.environ.get(\n \"COMPONENTS_AMAZON_ECS_NFS_MOUNT_POINT\", \"/mnt/aws-batch-nfs/\"\n)\nCOMPONENTS_AMAZON_ECS_LOG_GROUP_NAME = os.environ.get(\n \"COMPONENTS_AMAZON_ECS_LOG_GROUP_NAME\", \"\"\n)\nCOMPONENTS_AMAZON_ECS_LOGS_REGION = os.environ.get(\n \"COMPONENTS_AMAZON_ECS_LOGS_REGION\", AWS_DEFAULT_REGION\n)\nCOMPONENTS_AMAZON_ECS_CPU_CLUSTER_ARN = os.environ.get(\n \"COMPONENTS_AMAZON_ECS_CPU_CLUSTER_ARN\", \"\"\n)\nCOMPONENTS_AMAZON_ECS_GPU_CLUSTER_ARN = os.environ.get(\n \"COMPONENTS_AMAZON_ECS_GPU_CLUSTER_ARN\", \"\"\n)\nCOMPONENTS_AMAZON_ECS_TASK_ROLE_ARN = os.environ.get(\n \"COMPONENTS_AMAZON_ECS_TASK_ROLE_ARN\", \"\"\n)\nCOMPONENTS_DOCKER_BASE_URL = os.environ.get(\n \"COMPONENTS_DOCKER_BASE_URL\", \"unix://var/run/docker.sock\"\n)\nCOMPONENTS_DOCKER_TLSVERIFY = strtobool(\n os.environ.get(\"COMPONENTS_DOCKER_TLSVERIFY\", \"False\")\n)\nCOMPONENTS_DOCKER_TLSCACERT = os.environ.get(\"COMPONENTS_DOCKER_TLSCACERT\", \"\")\nCOMPONENTS_DOCKER_TLSCERT = os.environ.get(\"COMPONENTS_DOCKER_TLSCERT\", \"\")\nCOMPONENTS_DOCKER_TLSKEY = os.environ.get(\"COMPONENTS_DOCKER_TLSKEY\", \"\")\nCOMPONENTS_MEMORY_LIMIT = int(os.environ.get(\"COMPONENTS_MEMORY_LIMIT\", \"4\"))\nCOMPONENTS_IO_IMAGE = \"alpine:3.14\"\nCOMPONENTS_CPU_QUOTA = int(os.environ.get(\"COMPONENTS_CPU_QUOTA\", \"100000\"))\nCOMPONENTS_CPU_PERIOD = int(os.environ.get(\"COMPONENTS_CPU_PERIOD\", \"100000\"))\nCOMPONENTS_PIDS_LIMIT = int(os.environ.get(\"COMPONENTS_PIDS_LIMIT\", \"128\"))\nCOMPONENTS_CPU_SHARES = int(\n os.environ.get(\"COMPONENTS_CPU_SHARES\", \"1024\") # Default weight\n)\nCOMPONENTS_CPUSET_CPUS = str(os.environ.get(\"COMPONENTS_CPUSET_CPUS\", \"\"))\nCOMPONENTS_DOCKER_RUNTIME = os.environ.get(\"COMPONENTS_DOCKER_RUNTIME\", None)\nCOMPONENTS_NVIDIA_VISIBLE_DEVICES = os.environ.get(\n \"COMPONENTS_NVIDIA_VISIBLE_DEVICES\", \"void\"\n)\n\n# Set which template pack to use for forms\nCRISPY_TEMPLATE_PACK = \"bootstrap4\"\n\n# When using bootstrap error messages need to be renamed to danger\nMESSAGE_TAGS = {messages.ERROR: \"danger\"}\n\n# The name of the group whose members will be able to create reader studies\nREADER_STUDY_CREATORS_GROUP_NAME = \"reader_study_creators\"\n\n###############################################################################\n#\n# workspaces\n#\n###############################################################################\n\nWORKBENCH_SECRET_KEY = os.environ.get(\"WORKBENCH_SECRET_KEY\")\nWORKBENCH_API_URL = os.environ.get(\"WORKBENCH_API_URL\")\nWORKBENCH_ADMIN_USERNAME = os.environ.get(\"WORKBENCH_ADMIN_USERNAME\", \"demo\")\n\n###############################################################################\n#\n# workstations\n#\n###############################################################################\n\n# The workstation that is accessible by all authorised users\nDEFAULT_WORKSTATION_SLUG = os.environ.get(\n \"DEFAULT_WORKSTATION_SLUG\", \"cirrus-core\"\n)\nWORKSTATIONS_BASE_IMAGE_QUERY_PARAM = \"image\"\nWORKSTATIONS_OVERLAY_QUERY_PARAM = \"overlay\"\nWORKSTATIONS_READY_STUDY_QUERY_PARAM = \"readerStudy\"\nWORKSTATIONS_ALGORITHM_JOB_QUERY_PARAM = \"algorithmJob\"\nWORKSTATIONS_CONFIG_QUERY_PARAM = \"config\"\n# The name of the network that the workstations will be attached to\nWORKSTATIONS_NETWORK_NAME = os.environ.get(\n \"WORKSTATIONS_NETWORK_NAME\", \"grand-challengeorg_workstations\"\n)\n# The total limit on the number of sessions\nWORKSTATIONS_MAXIMUM_SESSIONS = int(\n os.environ.get(\"WORKSTATIONS_MAXIMUM_SESSIONS\", \"10\")\n)\n# The name of the group whose members will be able to create workstations\nWORKSTATIONS_CREATORS_GROUP_NAME = \"workstation_creators\"\nWORKSTATIONS_SESSION_DURATION_LIMIT = int(\n os.environ.get(\"WORKSTATIONS_SESSION_DURATION_LIMIT\", \"10000\")\n)\n# Which regions are available for workstations to run in\nWORKSTATIONS_ACTIVE_REGIONS = os.environ.get(\n \"WORKSTATIONS_ACTIVE_REGIONS\", AWS_DEFAULT_REGION\n).split(\",\")\nWORKSTATIONS_RENDERING_SUBDOMAINS = {\n # Possible AWS regions\n *[\n \"-\".join(z)\n for z in product(\n [\"us\", \"af\", \"ap\", \"ca\", \"cn\", \"eu\", \"me\", \"sa\"],\n [\n \"east\",\n \"west\",\n \"south\",\n \"north\",\n \"central\",\n \"northeast\",\n \"southeast\",\n \"northwest\",\n \"southwest\",\n ],\n [\"1\", \"2\", \"3\"],\n )\n ],\n # User defined regions\n \"eu-nl-1\",\n \"eu-nl-2\",\n}\n# Number of minutes grace period before the container is stopped\nWORKSTATIONS_GRACE_MINUTES = 5\n\nCELERY_BEAT_SCHEDULE = {\n \"push_metrics_to_cloudwatch\": {\n \"task\": \"grandchallenge.core.tasks.put_cloudwatch_metrics\",\n \"schedule\": timedelta(seconds=15),\n },\n \"ping_google\": {\n \"task\": \"grandchallenge.core.tasks.ping_google\",\n \"schedule\": timedelta(days=1),\n },\n \"update_publication_metadata\": {\n \"task\": \"grandchallenge.publications.tasks.update_publication_metadata\",\n \"schedule\": timedelta(days=1),\n },\n \"send_unread_notification_emails\": {\n \"task\": \"grandchallenge.notifications.tasks.send_unread_notification_emails\",\n \"schedule\": timedelta(days=1),\n },\n \"cleanup_stale_uploads\": {\n \"task\": \"grandchallenge.jqfileupload.tasks.cleanup_stale_uploads\",\n \"schedule\": timedelta(hours=1),\n },\n \"delete_old_user_uploads\": {\n \"task\": \"grandchallenge.uploads.tasks.delete_old_user_uploads\",\n \"schedule\": timedelta(hours=1),\n },\n \"clear_sessions\": {\n \"task\": \"grandchallenge.core.tasks.clear_sessions\",\n \"schedule\": timedelta(days=1),\n },\n \"update_challenge_results_cache\": {\n \"task\": \"grandchallenge.challenges.tasks.update_challenge_results_cache\",\n \"schedule\": timedelta(minutes=5),\n },\n \"validate_external_challenges\": {\n \"task\": \"grandchallenge.challenges.tasks.check_external_challenge_urls\",\n \"schedule\": timedelta(days=1),\n },\n **{\n f\"stop_expired_services_{region}\": {\n \"task\": \"grandchallenge.components.tasks.stop_expired_services\",\n \"kwargs\": {\n \"app_label\": \"workstations\",\n \"model_name\": \"session\",\n \"region\": region,\n },\n \"options\": {\"queue\": f\"workstations-{region}\"},\n \"schedule\": timedelta(minutes=WORKSTATIONS_GRACE_MINUTES),\n }\n for region in WORKSTATIONS_ACTIVE_REGIONS\n },\n}\n\n# The name of the group whose members will be able to create algorithms\nALGORITHMS_CREATORS_GROUP_NAME = \"algorithm_creators\"\n\n# Disallow some challenge names due to subdomain or media folder clashes\nDISALLOWED_CHALLENGE_NAMES = {\n \"m\",\n IMAGE_FILES_SUBDIRECTORY,\n \"logos\",\n \"banners\",\n \"mugshots\",\n \"docker\",\n EVALUATION_FILES_SUBDIRECTORY,\n \"evaluation-supplementary\",\n \"favicon\",\n \"i\",\n \"cache\",\n \"challenge\",\n \"challenges\",\n JQFILEUPLOAD_UPLOAD_SUBIDRECTORY,\n *USERNAME_DENYLIST,\n *WORKSTATIONS_RENDERING_SUBDOMAINS,\n}\n\n# Disallow registration from certain domains\nDISALLOWED_EMAIL_DOMAINS = {\n \"qq.com\",\n \"gm.uit.edu.vn\",\n \"wust.edu.cn\",\n *blocklist,\n}\n\n# GitHub App\nGITHUB_APP_INSTALL_URL = os.environ.get(\"GITHUB_APP_INSTALL_URL\", \"\")\nGITHUB_APP_ID = os.environ.get(\"GITHUB_APP_ID\", \"\")\nGITHUB_CLIENT_ID = os.environ.get(\"GITHUB_CLIENT_ID\", \"\")\nGITHUB_CLIENT_SECRET = os.environ.get(\"GITHUB_CLIENT_SECRET\", \"\")\nGITHUB_PRIVATE_KEY_BASE64 = os.environ.get(\"GITHUB_PRIVATE_KEY_BASE64\", \"\")\nGITHUB_WEBHOOK_SECRET = os.environ.get(\"GITHUB_WEBHOOK_SECRET\", \"\")\n\nCODEBUILD_PROJECT_NAME = os.environ.get(\"CODEBUILD_PROJECT_NAME\", \"\")\n\nOPEN_SOURCE_LICENSES = [\n \"Apache License 2.0\",\n \"MIT License\",\n \"GNU GPLv3\",\n \"GNU AGPLv3\",\n \"GNU GPLv3\",\n \"GNU LGPLv3\",\n \"Mozilla Public License 2.0\",\n \"Boost Software License 1.0\",\n \"The Unlicense\",\n]\n\n# Maximum file size in bytes to be opened by SimpleITK.ReadImage in cases.models.Image.get_sitk_image()\nMAX_SITK_FILE_SIZE = 268_435_456 # 256 mb\n\n# The maximum size of all the files in an upload session in bytes\nUPLOAD_SESSION_MAX_BYTES = 10_737_418_240 # 10 gb\n\n# Some forms have a lot of data, such as a reader study update view\n# that can contain reports about the medical images\nDATA_UPLOAD_MAX_MEMORY_SIZE = 16_777_216 # 16 mb\n\n# Some forms have a lot of fields, such as uploads of images\n# with many slices\nDATA_UPLOAD_MAX_NUMBER_FIELDS = int(\n os.environ.get(\"DATA_UPLOAD_MAX_NUMBER_FIELDS\", \"2048\")\n)\n\n# Default maximum width or height for thumbnails in retina workstation\nRETINA_DEFAULT_THUMBNAIL_SIZE = 128\n\n# Retina specific settings\nRETINA_GRADERS_GROUP_NAME = \"retina_graders\"\nRETINA_ADMINS_GROUP_NAME = \"retina_admins\"\n\nENABLE_DEBUG_TOOLBAR = False\n\nif DEBUG:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n\n # Allow localhost in development\n CORS_ORIGIN_REGEX_WHITELIST += [r\"^http://localhost:8888$\"]\n\n LOGGING[\"loggers\"][\"grandchallenge\"][\"level\"] = \"DEBUG\"\n\n PUBLIC_S3_STORAGE_KWARGS.update({\"secure_urls\": False})\n DEMO_ALGORITHM_IMAGE_PATH = os.path.join(SITE_ROOT, \"algorithm.tar.gz\")\n DEMO_ALGORITHM_SHA256 = \"sha256:5e81cef3738b7dbffc12c101990eb3b97f17642c09a2e0b64d5b3d4dd144e79b\"\n\n del CELERY_BEAT_SCHEDULE[\"push_metrics_to_cloudwatch\"]\n\n if ENABLE_DEBUG_TOOLBAR:\n INSTALLED_APPS += (\"debug_toolbar\",)\n\n MIDDLEWARE = (\n \"debug_toolbar.middleware.DebugToolbarMiddleware\",\n *MIDDLEWARE,\n )\n\n DEBUG_TOOLBAR_CONFIG = {\n \"SHOW_TOOLBAR_CALLBACK\": \"config.toolbar_callback\",\n \"RESULTS_CACHE_SIZE\": 100,\n }\n",
"path": "app/config/settings.py"
}
] | [
{
"content": "import os\nimport re\nfrom datetime import datetime, timedelta\nfrom distutils.util import strtobool as strtobool_i\nfrom itertools import product\n\nimport sentry_sdk\nfrom disposable_email_domains import blocklist\nfrom django.contrib.messages import constants as messages\nfrom django.urls import reverse\nfrom machina import MACHINA_MAIN_STATIC_DIR, MACHINA_MAIN_TEMPLATE_DIR\nfrom sentry_sdk.integrations.celery import CeleryIntegration\nfrom sentry_sdk.integrations.django import DjangoIntegration\nfrom sentry_sdk.integrations.logging import ignore_logger\n\nfrom config.denylist import USERNAME_DENYLIST\nfrom grandchallenge.algorithms.exceptions import ImageImportError\nfrom grandchallenge.components.exceptions import PriorStepFailed\nfrom grandchallenge.core.utils.markdown import BS4Extension\n\n\ndef strtobool(val) -> bool:\n \"\"\"Return disutils.util.strtobool as a boolean.\"\"\"\n return bool(strtobool_i(val))\n\n\nDEBUG = strtobool(os.environ.get(\"DEBUG\", \"False\"))\n\nCOMMIT_ID = os.environ.get(\"COMMIT_ID\", \"unknown\")\n\nADMINS = (\n # ('Your Name', '[email protected]'),\n)\n\n# Who gets the 404 notifications?\nmanager_email = os.environ.get(\"MANAGER_EMAIL\", None)\nif manager_email:\n MANAGERS = [(\"Manager\", manager_email)]\n\nIGNORABLE_404_URLS = [\n re.compile(r\".*\\.(php|cgi|asp).*\"),\n re.compile(r\"^/phpmyadmin.*\"),\n re.compile(r\"^/gen204.*\"),\n re.compile(r\"^/wp-content.*\"),\n re.compile(r\"^/wp.*\"),\n re.compile(r\"^/wordpress/.*\"),\n re.compile(r\"^/old/.*\", flags=re.IGNORECASE),\n re.compile(r\".*/trackback.*\"),\n re.compile(r\"^/site/.*\"),\n re.compile(r\"^/media/cache/.*\"),\n re.compile(r\"^/favicon.ico$\"),\n]\n\n# Used as starting points for various other paths. realpath(__file__) starts in\n# the config dir. We need to go one dir higher so path.join(\"..\")\nSITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": os.environ.get(\"POSTGRES_DB\", \"grandchallenge\"),\n \"USER\": os.environ.get(\"POSTGRES_USER\", \"grandchallenge\"),\n \"PASSWORD\": os.environ.get(\"POSTGRES_PASSWORD\", \"secretpassword\"),\n \"HOST\": os.environ.get(\"POSTGRES_HOST\", \"postgres\"),\n \"PORT\": os.environ.get(\"POSTGRES_PORT\", \"\"),\n \"OPTIONS\": {\n \"sslmode\": os.environ.get(\"POSTGRES_SSL_MODE\", \"prefer\"),\n \"sslrootcert\": os.path.join(\n SITE_ROOT, \"config\", \"certs\", \"rds-ca-2019-root.pem\"\n ),\n },\n \"ATOMIC_REQUESTS\": strtobool(\n os.environ.get(\"ATOMIC_REQUESTS\", \"True\")\n ),\n }\n}\n\nEMAIL_BACKEND = \"djcelery_email.backends.CeleryEmailBackend\"\nCELERY_EMAIL_BACKEND = \"django_ses.SESBackend\"\nDEFAULT_FROM_EMAIL = os.environ.get(\n \"DEFAULT_FROM_EMAIL\", \"webmaster@localhost\"\n)\nSERVER_EMAIL = os.environ.get(\"SERVER_EMAIL\", \"root@localhost\")\n\nANONYMOUS_USER_NAME = \"AnonymousUser\"\nREGISTERED_USERS_GROUP_NAME = \"__registered_users_group__\"\nREGISTERED_AND_ANON_USERS_GROUP_NAME = \"__registered_and_anonymous_users__\"\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = \"UTC\"\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = \"en-us\"\n\nSITE_ID = int(os.environ.get(\"SITE_ID\", \"1\"))\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# General forum\nDOCUMENTATION_HELP_FORUM_PK = os.environ.get(\n \"DOCUMENTATION_HELP_FORUM_PK\", \"1\"\n)\nDOCUMENTATION_HELP_FORUM_SLUG = os.environ.get(\n \"DOCUMENTATION_HELP_FORUM_SLUG\", \"general\"\n)\n\n# About Flatpage\nFLATPAGE_ABOUT_URL = os.environ.get(\"FLATPAGE_ABOUT_URL\", \"/about/\")\n\n##############################################################################\n#\n# Storage\n#\n##############################################################################\nDEFAULT_FILE_STORAGE = \"grandchallenge.core.storage.PublicS3Storage\"\n\n# Subdirectories on root for various files\nJQFILEUPLOAD_UPLOAD_SUBIDRECTORY = \"jqfileupload\"\nIMAGE_FILES_SUBDIRECTORY = \"images\"\nEVALUATION_FILES_SUBDIRECTORY = \"evaluation\"\nCOMPONENTS_FILES_SUBDIRECTORY = \"components\"\n\nAWS_S3_FILE_OVERWRITE = False\n# Note: deprecated in django storages 2.0\nAWS_BUCKET_ACL = \"private\"\nAWS_DEFAULT_ACL = \"private\"\nAWS_S3_MAX_MEMORY_SIZE = 1_048_576 # 100 MB\nAWS_S3_ENDPOINT_URL = os.environ.get(\"AWS_S3_ENDPOINT_URL\", None)\nAWS_DEFAULT_REGION = os.environ.get(\"AWS_DEFAULT_REGION\", \"eu-central-1\")\nAWS_SES_REGION_ENDPOINT = f\"email.{AWS_DEFAULT_REGION}.amazonaws.com\"\n\n# This is for storing files that should not be served to the public\nPRIVATE_S3_STORAGE_KWARGS = {\n \"bucket_name\": os.environ.get(\n \"PRIVATE_S3_STORAGE_BUCKET_NAME\", \"grand-challenge-private\"\n ),\n}\n\nPROTECTED_S3_STORAGE_KWARGS = {\n \"bucket_name\": os.environ.get(\n \"PROTECTED_S3_STORAGE_BUCKET_NAME\", \"grand-challenge-protected\"\n ),\n # This is the domain where people will be able to go to download data\n # from this bucket. Usually we would use reverse to find this out,\n # but this needs to be defined before the database is populated\n \"custom_domain\": os.environ.get(\n \"PROTECTED_S3_CUSTOM_DOMAIN\", \"gc.localhost/media\"\n ),\n}\nPROTECTED_S3_STORAGE_USE_CLOUDFRONT = strtobool(\n os.environ.get(\"PROTECTED_S3_STORAGE_USE_CLOUDFRONT\", \"False\")\n)\nPROTECTED_S3_STORAGE_CLOUDFRONT_DOMAIN = os.environ.get(\n \"PROTECTED_S3_STORAGE_CLOUDFRONT_DOMAIN_NAME\", \"\"\n)\n\nPUBLIC_S3_STORAGE_KWARGS = {\n \"bucket_name\": os.environ.get(\n \"PUBLIC_S3_STORAGE_BUCKET_NAME\", \"grand-challenge-public\"\n ),\n # Public bucket so do not use querystring_auth\n \"querystring_auth\": False,\n \"default_acl\": \"public-read\",\n}\n\nUPLOADS_S3_BUCKET_NAME = os.environ.get(\n \"UPLOADS_S3_BUCKET_NAME\", \"grand-challenge-uploads\"\n)\nUPLOADS_S3_USE_ACCELERATE_ENDPOINT = strtobool(\n os.environ.get(\"UPLOADS_S3_USE_ACCELERATE_ENDPOINT\", \"False\")\n)\nUPLOADS_MAX_SIZE_UNVERIFIED = int(\n os.environ.get(\"UPLOADS_MAX_SIZE_UNVERIFIED\", 2 * 1024 * 1024 * 1024)\n)\nUPLOADS_MAX_SIZE_VERIFIED = int(\n os.environ.get(\"UPLOADS_MAX_SIZE_VERIFIED\", 128 * 1024 * 1024 * 1024)\n)\n\n# Key pair used for signing CloudFront URLS, only used if\n# PROTECTED_S3_STORAGE_USE_CLOUDFRONT is True\nCLOUDFRONT_KEY_PAIR_ID = os.environ.get(\"CLOUDFRONT_KEY_PAIR_ID\", \"\")\nCLOUDFRONT_PRIVATE_KEY_BASE64 = os.environ.get(\n \"CLOUDFRONT_PRIVATE_KEY_BASE64\", \"\"\n)\nCLOUDFRONT_URL_EXPIRY_SECONDS = int(\n os.environ.get(\"CLOUDFRONT_URL_EXPIRY_SECONDS\", \"300\") # 5 mins\n)\n\n##############################################################################\n#\n# Caching\n#\n##############################################################################\nREDIS_HOSTNAME = os.environ.get(\"REDIS_HOSTNAME\", \"redis\")\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": f\"redis://{REDIS_HOSTNAME}:6379/1\",\n \"OPTIONS\": {\"CLIENT_CLASS\": \"django_redis.client.DefaultClient\"},\n },\n \"machina_attachments\": {\n \"BACKEND\": \"django.core.cache.backends.filebased.FileBasedCache\",\n \"LOCATION\": \"/tmp\",\n },\n}\n\nROOT_URLCONF = \"config.urls.root\"\nCHALLENGE_SUBDOMAIN_URL_CONF = \"config.urls.challenge_subdomain\"\nRENDERING_SUBDOMAIN_URL_CONF = \"config.urls.rendering_subdomain\"\nDEFAULT_SCHEME = os.environ.get(\"DEFAULT_SCHEME\", \"https\")\n\n# Workaround for https://github.com/ellmetha/django-machina/issues/219\nABSOLUTE_URL_OVERRIDES = {\n \"forum.forum\": lambda o: reverse(\n \"forum:forum\", kwargs={\"slug\": o.slug, \"pk\": o.pk},\n ),\n \"forum_conversation.topic\": lambda o: reverse(\n \"forum_conversation:topic\",\n kwargs={\n \"slug\": o.slug,\n \"pk\": o.pk,\n \"forum_slug\": o.forum.slug,\n \"forum_pk\": o.forum.pk,\n },\n ),\n}\n\nSESSION_COOKIE_DOMAIN = os.environ.get(\n \"SESSION_COOKIE_DOMAIN\", \".gc.localhost\"\n)\n# We're always running behind a proxy so set these to true\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SECURE = True\n# Trust all subdomains for CSRF, used for jqfileupload. Changed the name\n# of the CSRF token as existing ones are already in use.\nCSRF_COOKIE_DOMAIN = SESSION_COOKIE_DOMAIN\nCSRF_COOKIE_NAME = \"_csrftoken\"\nCSRF_TRUSTED_ORIGINS = [\n SESSION_COOKIE_DOMAIN,\n]\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\n# Set the allowed hosts to the cookie domain\nALLOWED_HOSTS = [SESSION_COOKIE_DOMAIN, \"web\"]\n\n# Security options\nSECURE_HSTS_SECONDS = int(os.environ.get(\"SECURE_HSTS_SECONDS\", \"0\"))\nSECURE_HSTS_INCLUDE_SUBDOMAINS = strtobool(\n os.environ.get(\"SECURE_HSTS_INCLUDE_SUBDOMAINS\", \"False\")\n)\nSECURE_HSTS_PRELOAD = strtobool(os.environ.get(\"SECURE_HSTS_PRELOAD\", \"True\"))\nSECURE_CONTENT_TYPE_NOSNIFF = strtobool(\n os.environ.get(\"SECURE_CONTENT_TYPE_NOSNIFF\", \"False\")\n)\nSECURE_BROWSER_XSS_FILTER = strtobool(\n os.environ.get(\"SECURE_BROWSER_XSS_FILTER\", \"False\")\n)\nX_FRAME_OPTIONS = os.environ.get(\"X_FRAME_OPTIONS\", \"DENY\")\n# \"strict-origin-when-cross-origin\" required for uploads for cross domain POSTs\nSECURE_REFERRER_POLICY = os.environ.get(\n \"SECURE_REFERRER_POLICY\", \"strict-origin-when-cross-origin\"\n)\n\nPERMISSIONS_POLICY = {\n \"accelerometer\": [],\n \"ambient-light-sensor\": [],\n \"autoplay\": [],\n \"camera\": [],\n \"display-capture\": [],\n \"document-domain\": [],\n \"encrypted-media\": [],\n \"fullscreen\": [\"self\"],\n \"geolocation\": [],\n \"gyroscope\": [],\n \"interest-cohort\": [],\n \"magnetometer\": [],\n \"microphone\": [],\n \"midi\": [],\n \"payment\": [],\n \"usb\": [],\n}\n\nIPWARE_META_PRECEDENCE_ORDER = (\n # Set by nginx\n \"HTTP_X_FORWARDED_FOR\",\n \"HTTP_X_REAL_IP\",\n)\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = \"/static/\"\n\nSTATIC_HOST = os.environ.get(\"DJANGO_STATIC_HOST\", \"\")\nSTATIC_URL = f\"{STATIC_HOST}/static/\"\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n)\n\n# Vendored static files will be put here\nSTATICFILES_DIRS = [\"/opt/static/\", MACHINA_MAIN_STATIC_DIR]\n\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = os.environ.get(\n \"SECRET_KEY\", \"d=%^l=xa02an9jn-$!*hy1)5yox$a-$2(ejt-2smimh=j4%8*b\"\n)\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n # Override the machina templates, everything else is found with\n # django.template.loaders.app_directories.Loader\n os.path.join(SITE_ROOT, \"grandchallenge/forums/templates/\"),\n MACHINA_MAIN_TEMPLATE_DIR,\n ],\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.i18n\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.static\",\n \"django.template.context_processors.tz\",\n \"django.template.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n \"grandchallenge.core.context_processors.challenge\",\n \"grandchallenge.core.context_processors.deployment_info\",\n \"grandchallenge.core.context_processors.debug\",\n \"grandchallenge.core.context_processors.sentry_dsn\",\n \"grandchallenge.core.context_processors.footer_links\",\n \"grandchallenge.core.context_processors.help_forum\",\n \"grandchallenge.core.context_processors.about_page\",\n \"machina.core.context_processors.metadata\",\n ],\n \"loaders\": [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n ],\n },\n }\n]\n\nMIDDLEWARE = (\n \"django.middleware.security.SecurityMiddleware\", # Keep security at top\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n # Keep whitenoise after security and before all else\n \"aws_xray_sdk.ext.django.middleware.XRayMiddleware\", # xray near the top\n \"corsheaders.middleware.CorsMiddleware\", # Keep CORS near the top\n \"django.middleware.common.BrokenLinkEmailsMiddleware\",\n # Keep BrokenLinkEmailsMiddleware near the top\n \"django_permissions_policy.PermissionsPolicyMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.contrib.sites.middleware.CurrentSiteMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"simple_history.middleware.HistoryRequestMiddleware\",\n # subdomain_middleware after CurrentSiteMiddleware\n \"grandchallenge.subdomains.middleware.subdomain_middleware\",\n \"grandchallenge.subdomains.middleware.challenge_subdomain_middleware\",\n \"grandchallenge.subdomains.middleware.subdomain_urlconf_middleware\",\n \"grandchallenge.timezones.middleware.TimezoneMiddleware\",\n \"machina.apps.forum_permission.middleware.ForumPermissionMiddleware\",\n # Flatpage fallback almost last\n \"django.contrib.flatpages.middleware.FlatpageFallbackMiddleware\",\n # Redirects last as they're a last resort\n \"django.contrib.redirects.middleware.RedirectFallbackMiddleware\",\n)\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = \"config.wsgi.application\"\n\nDJANGO_APPS = [\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.sites\",\n \"django.contrib.messages\",\n \"whitenoise.runserver_nostatic\", # Keep whitenoise above staticfiles\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"django.contrib.admin\",\n \"django.contrib.postgres\",\n \"django.contrib.flatpages\",\n \"django.contrib.sitemaps\",\n \"django.contrib.redirects\",\n]\n\nTHIRD_PARTY_APPS = [\n \"aws_xray_sdk.ext.django\", # tracing\n \"django_celery_results\", # database results backend\n \"django_celery_beat\", # periodic tasks\n \"djcelery_email\", # asynchronous emails\n \"guardian\", # per object permissions\n \"rest_framework\", # provides REST API\n \"knox\", # token auth for REST API\n \"crispy_forms\", # bootstrap forms\n \"django_select2\", # for multiple choice widgets\n \"django_summernote\", # for WYSIWYG page editing\n \"dal\", # for autocompletion of selection fields\n \"dal_select2\", # for autocompletion of selection fields\n \"django_extensions\", # custom extensions\n \"simple_history\", # for object history\n \"corsheaders\", # to allow api communication from subdomains\n \"markdownx\", # for editing markdown\n \"stdimage\",\n \"django_filters\",\n \"drf_spectacular\",\n \"allauth\",\n \"allauth.account\",\n \"allauth.socialaccount\",\n \"grandchallenge.profiles.providers.gmail\",\n # Notifications with overrides\n \"actstream\",\n \"grandchallenge.notifications\",\n # django-machina dependencies:\n \"mptt\",\n \"haystack\",\n \"widget_tweaks\",\n # djano-machina apps:\n \"machina\",\n \"machina.apps.forum\",\n \"machina.apps.forum_conversation.forum_attachments\",\n \"machina.apps.forum_conversation.forum_polls\",\n \"machina.apps.forum_feeds\",\n \"machina.apps.forum_moderation\",\n \"machina.apps.forum_search\",\n \"machina.apps.forum_tracking\",\n \"machina.apps.forum_permission\",\n # Overridden apps\n \"grandchallenge.forum_conversation\",\n \"grandchallenge.forum_member\",\n]\n\nLOCAL_APPS = [\n \"grandchallenge.admins\",\n \"grandchallenge.anatomy\",\n \"grandchallenge.api\",\n \"grandchallenge.api_tokens\",\n \"grandchallenge.challenges\",\n \"grandchallenge.core\",\n \"grandchallenge.evaluation\",\n \"grandchallenge.jqfileupload\",\n \"grandchallenge.pages\",\n \"grandchallenge.participants\",\n \"grandchallenge.profiles\",\n \"grandchallenge.teams\",\n \"grandchallenge.uploads\",\n \"grandchallenge.cases\",\n \"grandchallenge.algorithms\",\n \"grandchallenge.components\",\n \"grandchallenge.statistics\",\n \"grandchallenge.archives\",\n \"grandchallenge.patients\",\n \"grandchallenge.studies\",\n \"grandchallenge.registrations\",\n \"grandchallenge.annotations\",\n \"grandchallenge.retina_core\",\n \"grandchallenge.retina_api\",\n \"grandchallenge.workstations\",\n \"grandchallenge.workspaces\",\n \"grandchallenge.reader_studies\",\n \"grandchallenge.workstation_configs\",\n \"grandchallenge.policies\",\n \"grandchallenge.products\",\n \"grandchallenge.serving\",\n \"grandchallenge.blogs\",\n \"grandchallenge.publications\",\n \"grandchallenge.verifications\",\n \"grandchallenge.credits\",\n \"grandchallenge.task_categories\",\n \"grandchallenge.modalities\",\n \"grandchallenge.datatables\",\n \"grandchallenge.organizations\",\n \"grandchallenge.groups\",\n \"grandchallenge.github\",\n \"grandchallenge.codebuild\",\n \"grandchallenge.timezones\",\n \"grandchallenge.documentation\",\n \"grandchallenge.flatpages\",\n]\n\nINSTALLED_APPS = DJANGO_APPS + LOCAL_APPS + THIRD_PARTY_APPS\n\nADMIN_URL = f'{os.environ.get(\"DJANGO_ADMIN_URL\", \"django-admin\")}/'\n\nAUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.ModelBackend\",\n \"allauth.account.auth_backends.AuthenticationBackend\",\n \"guardian.backends.ObjectPermissionBackend\",\n]\n\nGOOGLE_ANALYTICS_ID = os.environ.get(\"GOOGLE_ANALYTICS_ID\", \"GA_TRACKING_ID\")\n\n##############################################################################\n#\n# django-allauth\n#\n##############################################################################\n\nACCOUNT_ADAPTER = \"grandchallenge.profiles.adapters.AccountAdapter\"\nACCOUNT_SIGNUP_FORM_CLASS = \"grandchallenge.profiles.forms.SignupForm\"\n\nACCOUNT_AUTHENTICATION_METHOD = \"username_email\"\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = \"mandatory\"\nACCOUNT_USERNAME_MIN_LENGTH = 4\nACCOUNT_DEFAULT_HTTP_PROTOCOL = \"https\"\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_USERNAME_BLACKLIST = USERNAME_DENYLIST\n\nSOCIALACCOUNT_ADAPTER = \"grandchallenge.profiles.adapters.SocialAccountAdapter\"\nSOCIALACCOUNT_AUTO_SIGNUP = False\nSOCIALACCOUNT_STORE_TOKENS = False\nSOCIALACCOUNT_PROVIDERS = {\n \"gmail\": {\n \"APP\": {\n \"client_id\": os.environ.get(\"SOCIAL_AUTH_GOOGLE_OAUTH2_KEY\", \"\"),\n \"secret\": os.environ.get(\"SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET\", \"\"),\n }\n }\n}\n\n# Use full paths as view name lookups do not work on subdomains\nLOGIN_URL = \"/accounts/login/\"\nLOGOUT_URL = \"/accounts/logout/\"\nLOGIN_REDIRECT_URL = \"/users/profile/\"\n\n##############################################################################\n#\n# stdimage\n#\n##############################################################################\n\n# Re-render the existing images if these values change\n# https://github.com/codingjoe/django-stdimage#re-rendering-variations\nSTDIMAGE_LOGO_VARIATIONS = {\n # Must be square\n \"full\": (None, None, False),\n \"x20\": (640, 640, True),\n \"x15\": (480, 480, True),\n \"x10\": (320, 320, True),\n \"x02\": (64, 64, True),\n}\nSTDIMAGE_SOCIAL_VARIATIONS = {\n # Values from social sharing\n \"full\": (None, None, False),\n \"x20\": (1280, 640, False),\n \"x15\": (960, 480, False),\n \"x10\": (640, 320, False),\n}\nSTDIMAGE_BANNER_VARIATIONS = {\n # Fixed width, any height\n \"full\": (None, None, False),\n \"x20\": (2220, None, False),\n \"x15\": (1665, None, False),\n \"x10\": (1110, None, False),\n}\n\n##############################################################################\n#\n# actstream\n#\n##############################################################################\n\nACTSTREAM_ENABLE = strtobool(os.environ.get(\"ACTSTREAM_ENABLE\", \"True\"))\nACTSTREAM_SETTINGS = {\n \"MANAGER\": \"actstream.managers.ActionManager\",\n \"FETCH_RELATIONS\": True,\n \"USE_JSONFIELD\": True,\n}\n\n##############################################################################\n#\n# django-summernote\n#\n##############################################################################\n\n# WYSIWYG editing with Summernote\nSUMMERNOTE_THEME = \"bs4\"\nSUMMERNOTE_CONFIG = {\n \"attachment_model\": \"uploads.SummernoteAttachment\",\n \"attachment_require_authentication\": True,\n \"summernote\": {\n \"width\": \"100%\",\n \"toolbar\": [\n [\"style\", [\"style\"]],\n [\n \"font\",\n [\"bold\", \"italic\", \"underline\", \"strikethrough\", \"clear\"],\n ],\n [\"para\", [\"ul\", \"ol\", \"paragraph\"]],\n [\"insert\", [\"link\", \"picture\", \"hr\"]],\n [\"view\", [\"fullscreen\", \"codeview\"]],\n [\"help\", [\"help\"]],\n ],\n },\n}\n\n# Settings for allowed HTML\nBLEACH_ALLOWED_TAGS = [\n \"a\",\n \"abbr\",\n \"acronym\",\n \"b\",\n \"blockquote\",\n \"br\",\n \"code\",\n \"col\",\n \"div\",\n \"em\",\n \"h1\",\n \"h2\",\n \"h3\",\n \"h4\",\n \"h5\",\n \"h6\",\n \"hr\",\n \"i\",\n \"img\",\n \"li\",\n \"ol\",\n \"p\",\n \"pre\",\n \"span\",\n \"strike\",\n \"strong\",\n \"table\",\n \"tbody\",\n \"thead\",\n \"td\",\n \"th\",\n \"tr\",\n \"u\",\n \"ul\",\n \"video\",\n]\nBLEACH_ALLOWED_ATTRIBUTES = {\n \"*\": [\"class\", \"data-toggle\", \"id\", \"style\", \"role\"],\n \"a\": [\"href\", \"title\", \"target\", \"rel\"],\n \"abbr\": [\"title\"],\n \"acronym\": [\"title\"],\n \"img\": [\"height\", \"src\", \"width\"],\n # For bootstrap tables: https://getbootstrap.com/docs/4.3/content/tables/\n \"th\": [\"scope\", \"colspan\"],\n \"td\": [\"colspan\"],\n \"video\": [\"src\", \"loop\", \"controls\", \"poster\"],\n}\nBLEACH_ALLOWED_STYLES = [\"height\", \"margin-left\", \"text-align\", \"width\"]\nBLEACH_ALLOWED_PROTOCOLS = [\"http\", \"https\", \"mailto\"]\nBLEACH_STRIP = strtobool(os.environ.get(\"BLEACH_STRIP\", \"True\"))\n\n# The markdown processor\nMARKDOWNX_MEDIA_PATH = datetime.now().strftime(\"i/%Y/%m/%d/\")\nMARKDOWNX_MARKDOWN_EXTENSIONS = [\n \"markdown.extensions.fenced_code\",\n \"markdown.extensions.tables\",\n \"markdown.extensions.sane_lists\",\n \"markdown.extensions.codehilite\",\n BS4Extension(),\n]\nMARKDOWNX_MARKDOWNIFY_FUNCTION = (\n \"grandchallenge.core.templatetags.bleach.md2html\"\n)\nMARKDOWNX_MARKDOWN_EXTENSION_CONFIGS = {}\nMARKDOWNX_IMAGE_MAX_SIZE = {\"size\": (2000, 0), \"quality\": 90}\n\nHAYSTACK_CONNECTIONS = {\n \"default\": {\"ENGINE\": \"haystack.backends.simple_backend.SimpleEngine\"},\n}\n\nFORUMS_CHALLENGE_CATEGORY_NAME = \"Challenges\"\nMACHINA_BASE_TEMPLATE_NAME = \"base.html\"\nMACHINA_PROFILE_AVATARS_ENABLED = False\nMACHINA_FORUM_NAME = \"Grand Challenge Forums\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\"\n },\n {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"},\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"\n },\n]\n\n# A sample logging configuration. More info in configuration can be found at\n# https://docs.djangoproject.com/en/dev/topics/logging/ .\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\"console\": {\"class\": \"logging.StreamHandler\"}},\n \"loggers\": {\n \"grandchallenge\": {\n \"level\": os.environ.get(\"GRAND_CHALLENGE_LOG_LEVEL\", \"INFO\"),\n \"handlers\": [\"console\"],\n \"propagate\": True,\n },\n \"django\": {\n \"level\": os.environ.get(\"DJANGO_LOG_LEVEL\", \"INFO\"),\n \"handlers\": [\"console\"],\n \"propagate\": True,\n },\n \"werkzeug\": {\n \"handlers\": [\"console\"],\n \"level\": \"DEBUG\",\n \"propagate\": True,\n },\n # As AWS_XRAY_CONTEXT_MISSING can only be set to LOG_ERROR,\n # silence errors from this sdk as they flood the logs in\n # RedirectFallbackMiddleware\n \"aws_xray_sdk\": {\n \"handlers\": [\"console\"],\n \"level\": \"CRITICAL\",\n \"propagate\": True,\n },\n },\n}\n\n###############################################################################\n# SENTRY\n###############################################################################\n\nSENTRY_DSN = os.environ.get(\"DJANGO_SENTRY_DSN\", \"\")\nSENTRY_ENABLE_JS_REPORTING = strtobool(\n os.environ.get(\"SENTRY_ENABLE_JS_REPORTING\", \"False\")\n)\nWORKSTATION_SENTRY_DSN = os.environ.get(\"WORKSTATION_SENTRY_DSN\", \"\")\n\nif SENTRY_DSN:\n sentry_sdk.init(\n dsn=SENTRY_DSN,\n integrations=[DjangoIntegration(), CeleryIntegration()],\n release=COMMIT_ID,\n traces_sample_rate=float(\n os.environ.get(\"SENTRY_TRACES_SAMPLE_RATE\", \"0.0\")\n ),\n ignore_errors=[PriorStepFailed, ImageImportError],\n )\n ignore_logger(\"django.security.DisallowedHost\")\n ignore_logger(\"aws_xray_sdk\")\n\n###############################################################################\n# XRAY\n###############################################################################\nXRAY_RECORDER = {\n \"AWS_XRAY_CONTEXT_MISSING\": \"LOG_ERROR\",\n \"PLUGINS\": (\"ECSPlugin\",),\n \"AWS_XRAY_TRACING_NAME\": SESSION_COOKIE_DOMAIN.lstrip(\".\"),\n}\n\n###############################################################################\n#\n# django-rest-framework and drf-spectacular\n#\n###############################################################################\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAdminUser\",),\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"knox.auth.TokenAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n \"DEFAULT_RENDERER_CLASSES\": [\"rest_framework.renderers.JSONRenderer\"],\n \"DEFAULT_PAGINATION_CLASS\": \"grandchallenge.api.pagination.MaxLimit1000OffsetPagination\",\n \"PAGE_SIZE\": 100,\n \"UNAUTHENTICATED_USER\": \"guardian.utils.get_anonymous_user\",\n \"DEFAULT_SCHEMA_CLASS\": \"drf_spectacular.openapi.AutoSchema\",\n}\n\nSPECTACULAR_SETTINGS = {\n \"SCHEMA_PATH_PREFIX\": r\"/api/v[0-9]\",\n \"TITLE\": f\"{SESSION_COOKIE_DOMAIN.lstrip('.')} API\",\n \"DESCRIPTION\": f\"The API for {SESSION_COOKIE_DOMAIN.lstrip('.')}.\",\n \"TOS\": f\"https://{SESSION_COOKIE_DOMAIN.lstrip('.')}/policies/terms-of-service/\",\n \"LICENSE\": {\"name\": \"Apache License 2.0\"},\n \"VERSION\": \"1.0.0\",\n}\n\nREST_KNOX = {\n \"AUTH_HEADER_PREFIX\": \"Bearer\",\n}\n\n###############################################################################\n#\n# CORS\n#\n###############################################################################\n\nVALID_SUBDOMAIN_REGEX = r\"[A-Za-z0-9](?:[A-Za-z0-9\\-]{0,61}[A-Za-z0-9])?\"\nCORS_ORIGIN_REGEX_WHITELIST = [\n rf\"^https:\\/\\/{VALID_SUBDOMAIN_REGEX}{re.escape(SESSION_COOKIE_DOMAIN)}$\",\n rf\"^https:\\/\\/{VALID_SUBDOMAIN_REGEX}.static.observableusercontent.com$\",\n]\n# SESSION_COOKIE_SAMESITE should be set to \"lax\" so won't send credentials\n# across domains, but this will allow workstations to access the api\nCORS_ALLOW_CREDENTIALS = True\n\n###############################################################################\n#\n# celery\n#\n###############################################################################\n\nCELERY_TASK_DECORATOR_KWARGS = {\n \"acks-late-2xlarge\": {\n # For idempotent tasks that take a long time (<7200s)\n # or require a large amount of memory\n \"acks_late\": True,\n \"reject_on_worker_lost\": True,\n \"queue\": \"acks-late-2xlarge\",\n },\n \"acks-late-micro-short\": {\n # For idempotent tasks that take a short time (<300s)\n # and do not require a large amount of memory\n \"acks_late\": True,\n \"reject_on_worker_lost\": True,\n \"queue\": \"acks-late-micro-short\",\n },\n}\n\nCELERY_RESULT_BACKEND = os.environ.get(\"CELERY_RESULT_BACKEND\", \"django-db\")\nCELERY_RESULT_PERSISTENT = True\nCELERY_TASK_ACKS_LATE = strtobool(\n os.environ.get(\"CELERY_TASK_ACKS_LATE\", \"False\")\n)\nCELERY_WORKER_PREFETCH_MULTIPLIER = int(\n os.environ.get(\"CELERY_WORKER_PREFETCH_MULTIPLIER\", \"1\")\n)\nCELERY_TASK_SOFT_TIME_LIMIT = int(\n os.environ.get(\"CELERY_TASK_SOFT_TIME_LIMIT\", \"7200\")\n)\nCELERY_TASK_TIME_LIMIT = int(os.environ.get(\"CELERY_TASK_TIME_LIMIT\", \"7260\"))\nCELERY_BROKER_TRANSPORT_OPTIONS = {\n \"visibility_timeout\": int(1.1 * CELERY_TASK_TIME_LIMIT)\n}\nCELERY_BROKER_CONNECTION_MAX_RETRIES = 0\n\nif os.environ.get(\"BROKER_TYPE\", \"\").lower() == \"sqs\":\n CELERY_BROKER_URL = \"sqs://\"\n\n CELERY_WORKER_ENABLE_REMOTE_CONTROL = False\n CELERY_BROKER_USE_SSL = True\n\n CELERY_BROKER_TRANSPORT_OPTIONS.update(\n {\n \"queue_name_prefix\": os.environ.get(\n \"CELERY_BROKER_QUEUE_NAME_PREFIX\", \"gclocalhost-\"\n ),\n \"region\": os.environ.get(\n \"CELERY_BROKER_REGION\", AWS_DEFAULT_REGION\n ),\n \"polling_interval\": int(\n os.environ.get(\"CELERY_BROKER_POLLING_INTERVAL\", \"1\")\n ),\n }\n )\nelse:\n CELERY_BROKER_URL = os.environ.get(\n \"BROKER_URL\", f\"redis://{REDIS_HOSTNAME}:6379/0\"\n )\n\n# Keep results of sent emails\nCELERY_EMAIL_CHUNK_SIZE = 1\nCELERY_EMAIL_TASK_CONFIG = {\n \"ignore_result\": False,\n}\n\nCOMPONENTS_DEFAULT_BACKEND = os.environ.get(\n \"COMPONENTS_DEFAULT_BACKEND\",\n \"grandchallenge.components.backends.amazon_ecs.AmazonECSExecutor\",\n)\nCOMPONENTS_REGISTRY_URL = os.environ.get(\n \"COMPONENTS_REGISTRY_URL\", \"registry:5000\"\n)\nCOMPONENTS_REGISTRY_PREFIX = os.environ.get(\n \"COMPONENTS_REGISTRY_PREFIX\", SESSION_COOKIE_DOMAIN.lstrip(\".\")\n)\nCOMPONENTS_REGISTRY_INSECURE = strtobool(\n os.environ.get(\"COMPONENTS_REGISTRY_INSECURE\", \"False\")\n)\nCOMPONENTS_MAXIMUM_IMAGE_SIZE = 10_737_418_240 # 10 gb\nCOMPONENTS_AMAZON_ECS_NFS_MOUNT_POINT = os.environ.get(\n \"COMPONENTS_AMAZON_ECS_NFS_MOUNT_POINT\", \"/mnt/aws-batch-nfs/\"\n)\nCOMPONENTS_AMAZON_ECS_LOG_GROUP_NAME = os.environ.get(\n \"COMPONENTS_AMAZON_ECS_LOG_GROUP_NAME\", \"\"\n)\nCOMPONENTS_AMAZON_ECS_LOGS_REGION = os.environ.get(\n \"COMPONENTS_AMAZON_ECS_LOGS_REGION\", AWS_DEFAULT_REGION\n)\nCOMPONENTS_AMAZON_ECS_CPU_CLUSTER_ARN = os.environ.get(\n \"COMPONENTS_AMAZON_ECS_CPU_CLUSTER_ARN\", \"\"\n)\nCOMPONENTS_AMAZON_ECS_GPU_CLUSTER_ARN = os.environ.get(\n \"COMPONENTS_AMAZON_ECS_GPU_CLUSTER_ARN\", \"\"\n)\nCOMPONENTS_AMAZON_ECS_TASK_ROLE_ARN = os.environ.get(\n \"COMPONENTS_AMAZON_ECS_TASK_ROLE_ARN\", \"\"\n)\nCOMPONENTS_DOCKER_BASE_URL = os.environ.get(\n \"COMPONENTS_DOCKER_BASE_URL\", \"unix://var/run/docker.sock\"\n)\nCOMPONENTS_DOCKER_TLSVERIFY = strtobool(\n os.environ.get(\"COMPONENTS_DOCKER_TLSVERIFY\", \"False\")\n)\nCOMPONENTS_DOCKER_TLSCACERT = os.environ.get(\"COMPONENTS_DOCKER_TLSCACERT\", \"\")\nCOMPONENTS_DOCKER_TLSCERT = os.environ.get(\"COMPONENTS_DOCKER_TLSCERT\", \"\")\nCOMPONENTS_DOCKER_TLSKEY = os.environ.get(\"COMPONENTS_DOCKER_TLSKEY\", \"\")\nCOMPONENTS_MEMORY_LIMIT = int(os.environ.get(\"COMPONENTS_MEMORY_LIMIT\", \"4\"))\nCOMPONENTS_IO_IMAGE = \"alpine:3.14\"\nCOMPONENTS_CPU_QUOTA = int(os.environ.get(\"COMPONENTS_CPU_QUOTA\", \"100000\"))\nCOMPONENTS_CPU_PERIOD = int(os.environ.get(\"COMPONENTS_CPU_PERIOD\", \"100000\"))\nCOMPONENTS_PIDS_LIMIT = int(os.environ.get(\"COMPONENTS_PIDS_LIMIT\", \"128\"))\nCOMPONENTS_CPU_SHARES = int(\n os.environ.get(\"COMPONENTS_CPU_SHARES\", \"1024\") # Default weight\n)\nCOMPONENTS_CPUSET_CPUS = str(os.environ.get(\"COMPONENTS_CPUSET_CPUS\", \"\"))\nCOMPONENTS_DOCKER_RUNTIME = os.environ.get(\"COMPONENTS_DOCKER_RUNTIME\", None)\nCOMPONENTS_NVIDIA_VISIBLE_DEVICES = os.environ.get(\n \"COMPONENTS_NVIDIA_VISIBLE_DEVICES\", \"void\"\n)\n\n# Set which template pack to use for forms\nCRISPY_TEMPLATE_PACK = \"bootstrap4\"\n\n# When using bootstrap error messages need to be renamed to danger\nMESSAGE_TAGS = {messages.ERROR: \"danger\"}\n\n# The name of the group whose members will be able to create reader studies\nREADER_STUDY_CREATORS_GROUP_NAME = \"reader_study_creators\"\n\n###############################################################################\n#\n# workspaces\n#\n###############################################################################\n\nWORKBENCH_SECRET_KEY = os.environ.get(\"WORKBENCH_SECRET_KEY\")\nWORKBENCH_API_URL = os.environ.get(\"WORKBENCH_API_URL\")\nWORKBENCH_ADMIN_USERNAME = os.environ.get(\"WORKBENCH_ADMIN_USERNAME\", \"demo\")\n\n###############################################################################\n#\n# workstations\n#\n###############################################################################\n\n# The workstation that is accessible by all authorised users\nDEFAULT_WORKSTATION_SLUG = os.environ.get(\n \"DEFAULT_WORKSTATION_SLUG\", \"cirrus-core\"\n)\nWORKSTATIONS_BASE_IMAGE_QUERY_PARAM = \"image\"\nWORKSTATIONS_OVERLAY_QUERY_PARAM = \"overlay\"\nWORKSTATIONS_READY_STUDY_QUERY_PARAM = \"readerStudy\"\nWORKSTATIONS_ALGORITHM_JOB_QUERY_PARAM = \"algorithmJob\"\nWORKSTATIONS_CONFIG_QUERY_PARAM = \"config\"\n# The name of the network that the workstations will be attached to\nWORKSTATIONS_NETWORK_NAME = os.environ.get(\n \"WORKSTATIONS_NETWORK_NAME\", \"grand-challengeorg_workstations\"\n)\n# The total limit on the number of sessions\nWORKSTATIONS_MAXIMUM_SESSIONS = int(\n os.environ.get(\"WORKSTATIONS_MAXIMUM_SESSIONS\", \"10\")\n)\n# The name of the group whose members will be able to create workstations\nWORKSTATIONS_CREATORS_GROUP_NAME = \"workstation_creators\"\nWORKSTATIONS_SESSION_DURATION_LIMIT = int(\n os.environ.get(\"WORKSTATIONS_SESSION_DURATION_LIMIT\", \"10000\")\n)\n# Which regions are available for workstations to run in\nWORKSTATIONS_ACTIVE_REGIONS = os.environ.get(\n \"WORKSTATIONS_ACTIVE_REGIONS\", AWS_DEFAULT_REGION\n).split(\",\")\nWORKSTATIONS_RENDERING_SUBDOMAINS = {\n # Possible AWS regions\n *[\n \"-\".join(z)\n for z in product(\n [\"us\", \"af\", \"ap\", \"ca\", \"cn\", \"eu\", \"me\", \"sa\"],\n [\n \"east\",\n \"west\",\n \"south\",\n \"north\",\n \"central\",\n \"northeast\",\n \"southeast\",\n \"northwest\",\n \"southwest\",\n ],\n [\"1\", \"2\", \"3\"],\n )\n ],\n # User defined regions\n \"eu-nl-1\",\n \"eu-nl-2\",\n}\n# Number of minutes grace period before the container is stopped\nWORKSTATIONS_GRACE_MINUTES = 5\n\nCELERY_BEAT_SCHEDULE = {\n \"push_metrics_to_cloudwatch\": {\n \"task\": \"grandchallenge.core.tasks.put_cloudwatch_metrics\",\n \"schedule\": timedelta(seconds=15),\n },\n \"ping_google\": {\n \"task\": \"grandchallenge.core.tasks.ping_google\",\n \"schedule\": timedelta(days=1),\n },\n \"update_publication_metadata\": {\n \"task\": \"grandchallenge.publications.tasks.update_publication_metadata\",\n \"schedule\": timedelta(days=1),\n },\n \"send_unread_notification_emails\": {\n \"task\": \"grandchallenge.notifications.tasks.send_unread_notification_emails\",\n \"schedule\": timedelta(days=1),\n },\n \"cleanup_stale_uploads\": {\n \"task\": \"grandchallenge.jqfileupload.tasks.cleanup_stale_uploads\",\n \"schedule\": timedelta(hours=1),\n },\n \"delete_old_user_uploads\": {\n \"task\": \"grandchallenge.uploads.tasks.delete_old_user_uploads\",\n \"schedule\": timedelta(hours=1),\n },\n \"clear_sessions\": {\n \"task\": \"grandchallenge.core.tasks.clear_sessions\",\n \"schedule\": timedelta(days=1),\n },\n \"update_challenge_results_cache\": {\n \"task\": \"grandchallenge.challenges.tasks.update_challenge_results_cache\",\n \"schedule\": timedelta(minutes=5),\n },\n \"validate_external_challenges\": {\n \"task\": \"grandchallenge.challenges.tasks.check_external_challenge_urls\",\n \"schedule\": timedelta(days=1),\n },\n **{\n f\"stop_expired_services_{region}\": {\n \"task\": \"grandchallenge.components.tasks.stop_expired_services\",\n \"kwargs\": {\n \"app_label\": \"workstations\",\n \"model_name\": \"session\",\n \"region\": region,\n },\n \"options\": {\"queue\": f\"workstations-{region}\"},\n \"schedule\": timedelta(minutes=WORKSTATIONS_GRACE_MINUTES),\n }\n for region in WORKSTATIONS_ACTIVE_REGIONS\n },\n}\n\n# The name of the group whose members will be able to create algorithms\nALGORITHMS_CREATORS_GROUP_NAME = \"algorithm_creators\"\n\n# Disallow some challenge names due to subdomain or media folder clashes\nDISALLOWED_CHALLENGE_NAMES = {\n \"m\",\n IMAGE_FILES_SUBDIRECTORY,\n \"logos\",\n \"banners\",\n \"mugshots\",\n \"docker\",\n EVALUATION_FILES_SUBDIRECTORY,\n \"evaluation-supplementary\",\n \"favicon\",\n \"i\",\n \"cache\",\n \"challenge\",\n \"challenges\",\n JQFILEUPLOAD_UPLOAD_SUBIDRECTORY,\n *USERNAME_DENYLIST,\n *WORKSTATIONS_RENDERING_SUBDOMAINS,\n}\n\n# Disallow registration from certain domains\nDISALLOWED_EMAIL_DOMAINS = {\n \"qq.com\",\n \"gm.uit.edu.vn\",\n \"wust.edu.cn\",\n *blocklist,\n}\n\n# GitHub App\nGITHUB_APP_INSTALL_URL = os.environ.get(\"GITHUB_APP_INSTALL_URL\", \"\")\nGITHUB_APP_ID = os.environ.get(\"GITHUB_APP_ID\", \"\")\nGITHUB_CLIENT_ID = os.environ.get(\"GITHUB_CLIENT_ID\", \"\")\nGITHUB_CLIENT_SECRET = os.environ.get(\"GITHUB_CLIENT_SECRET\", \"\")\nGITHUB_PRIVATE_KEY_BASE64 = os.environ.get(\"GITHUB_PRIVATE_KEY_BASE64\", \"\")\nGITHUB_WEBHOOK_SECRET = os.environ.get(\"GITHUB_WEBHOOK_SECRET\", \"\")\n\nCODEBUILD_PROJECT_NAME = os.environ.get(\"CODEBUILD_PROJECT_NAME\", \"\")\n\nOPEN_SOURCE_LICENSES = [\n \"Apache License 2.0\",\n \"MIT License\",\n \"GNU GPLv3\",\n \"GNU AGPLv3\",\n \"GNU GPLv3\",\n \"GNU LGPLv3\",\n \"Mozilla Public License 2.0\",\n \"Boost Software License 1.0\",\n \"The Unlicense\",\n]\n\n# Maximum file size in bytes to be opened by SimpleITK.ReadImage in cases.models.Image.get_sitk_image()\nMAX_SITK_FILE_SIZE = 268_435_456 # 256 mb\n\n# The maximum size of all the files in an upload session in bytes\nUPLOAD_SESSION_MAX_BYTES = 10_737_418_240 # 10 gb\n\n# Some forms have a lot of data, such as a reader study update view\n# that can contain reports about the medical images\nDATA_UPLOAD_MAX_MEMORY_SIZE = 16_777_216 # 16 mb\n\n# Some forms have a lot of fields, such as uploads of images\n# with many slices\nDATA_UPLOAD_MAX_NUMBER_FIELDS = int(\n os.environ.get(\"DATA_UPLOAD_MAX_NUMBER_FIELDS\", \"2048\")\n)\n\n# Default maximum width or height for thumbnails in retina workstation\nRETINA_DEFAULT_THUMBNAIL_SIZE = 128\n\n# Retina specific settings\nRETINA_GRADERS_GROUP_NAME = \"retina_graders\"\nRETINA_ADMINS_GROUP_NAME = \"retina_admins\"\n\nENABLE_DEBUG_TOOLBAR = False\n\nif DEBUG:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n\n # Allow localhost in development\n CORS_ORIGIN_REGEX_WHITELIST += [r\"^http://localhost:8888$\"]\n\n LOGGING[\"loggers\"][\"grandchallenge\"][\"level\"] = \"DEBUG\"\n\n PUBLIC_S3_STORAGE_KWARGS.update({\"secure_urls\": False})\n DEMO_ALGORITHM_IMAGE_PATH = os.path.join(SITE_ROOT, \"algorithm.tar.gz\")\n DEMO_ALGORITHM_SHA256 = \"sha256:5e81cef3738b7dbffc12c101990eb3b97f17642c09a2e0b64d5b3d4dd144e79b\"\n\n del CELERY_BEAT_SCHEDULE[\"push_metrics_to_cloudwatch\"]\n\n if ENABLE_DEBUG_TOOLBAR:\n INSTALLED_APPS += (\"debug_toolbar\",)\n\n MIDDLEWARE = (\n \"debug_toolbar.middleware.DebugToolbarMiddleware\",\n *MIDDLEWARE,\n )\n\n DEBUG_TOOLBAR_CONFIG = {\n \"SHOW_TOOLBAR_CALLBACK\": \"config.toolbar_callback\",\n \"RESULTS_CACHE_SIZE\": 100,\n }\n",
"path": "app/config/settings.py"
}
] | diff --git a/app/config/settings.py b/app/config/settings.py
index 7840edf0c6..e2aef9a903 100644
--- a/app/config/settings.py
+++ b/app/config/settings.py
@@ -286,7 +286,7 @@ def strtobool(val) -> bool:
"display-capture": [],
"document-domain": [],
"encrypted-media": [],
- "fullscreen": [],
+ "fullscreen": ["self"],
"geolocation": [],
"gyroscope": [],
"interest-cohort": [],
|
modin-project__modin-373 | TypeError: bad operand type for abs(): 'SeriesView'
### Describe the problem
<!-- Describe the problem clearly here. -->
SeriesView does not implement the `__abs__` operator yet, but Series does. Since this is a builtin, it cannot default back and must be implemented.
### Source code / logs
`abs(df['col1'])`
<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->
| [
{
"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pandas\nimport inspect\nimport numpy as np\n\n\n# from .utils import _inherit_docstrings\n\n\ndef na_op():\n \"\"\"Pandas uses a similar function to handle na values.\n \"\"\"\n raise NotImplementedError(\"Not Yet implemented.\")\n\n\nclass SeriesView(object):\n \"\"\"A wrapper class for pandas Series.\n\n Note: The main use of this class is to help us implement inplace operations that\n propagate their changes back to the DataFrame that a Series belongs to. We are\n only need to use this object when `__getitem__` returns a pandas Series, or when\n `loc`/`iloc` return a Series as well.\n\n Important: This is not needed to replace every Series in Modin. For example, when an\n operation on a Series returns a new Series, it does not need to return an object\n of this class. It can return a Series because the new object does not have a\n DataFrame that it is associated with.\n\n \"\"\"\n\n def __init__(self, series, parent_df, loc):\n assert type(series) is pandas.Series\n from .dataframe import DataFrame\n\n assert type(parent_df) is DataFrame\n assert type(loc) is tuple\n self.series = series\n self.parent_df = parent_df\n self._loc = loc\n\n def __repr__(self):\n return repr(self.series)\n\n def __str__(self):\n return str(self.series)\n\n def __comparisons__(self, func):\n def compare_func(other):\n if hasattr(other, \"series\"):\n other = other.series\n return getattr(self.series, func)(other)\n\n return compare_func\n\n def __eq__(self, other):\n return self.__comparisons__(\"__eq__\")(other)\n\n def __ge__(self, other):\n return self.__comparisons__(\"__ge__\")(other)\n\n def __gt__(self, other):\n return self.__comparisons__(\"__gt__\")(other)\n\n def __le__(self, other):\n return self.__comparisons__(\"__le__\")(other)\n\n def __lt__(self, other):\n return self.__comparisons__(\"__lt__\")(other)\n\n def __ne__(self, other):\n return self.__comparisons__(\"__ne__\")(other)\n\n def __arithmetic_op__(self, func):\n def arithemtic_op(other):\n if hasattr(other, \"series\"):\n other = other.series\n return getattr(self.series, func)(other)\n\n return arithemtic_op\n\n def __add__(self, other):\n return self.__arithmetic_op__(\"__add__\")(other)\n\n def __mul__(self, other):\n return self.__arithmetic_op__(\"__mul__\")(other)\n\n def __sub__(self, other):\n return self.__arithmetic_op__(\"__sub__\")(other)\n\n def __truediv__(self, other):\n return self.__arithmetic_op__(\"__truediv__\")(other)\n\n def __floordiv__(self, other):\n return self.__arithmetic_op__(\"__floordiv__\")(other)\n\n def __mod__(self, other):\n return self.__arithmetic_op__(\"__mod__\")(other)\n\n def __pow__(self, other):\n return self.__arithmetic_op__(\"__pow__\")(other)\n\n def __radd__(self, other):\n return self.__arithmetic_op__(\"__radd__\")(other)\n\n def __rmul__(self, other):\n return self.__arithmetic_op__(\"__rmul__\")(other)\n\n def __rsub__(self, other):\n return self.__arithmetic_op__(\"__rsub__\")(other)\n\n def __rtruediv__(self, other):\n return self.__arithmetic_op__(\"__rtruediv__\")(other)\n\n def __rfloordiv__(self, other):\n return self.__arithmetic_op__(\"__rfloordiv__\")(other)\n\n def __rmod__(self, other):\n return self.__arithmetic_op__(\"__rmod__\")(other)\n\n def __rpow__(self, other):\n return self.__arithmetic_op__(\"__rpow__\")(other)\n\n def __iadd__(self, other):\n return self.__arithmetic_op__(\"__iadd__\")(other)\n\n def __imul__(self, other):\n return self.__arithmetic_op__(\"__imul__\")(other)\n\n def __isub__(self, other):\n return self.__arithmetic_op__(\"__isub__\")(other)\n\n def __itruediv__(self, other):\n return self.__arithmetic_op__(\"__itruediv__\")(other)\n\n def __ifloordiv__(self, other):\n return self.__arithmetic_op__(\"__ifloordiv__\")(other)\n\n def __imod__(self, other):\n return self.__arithmetic_op__(\"__imod__\")(other)\n\n def __ipow__(self, other):\n return self.__arithmetic_op__(\"__ipow__\")(other)\n\n def __neg__(self, other):\n return self.__arithmetic_op__(\"__neg__\")(other)\n\n def __iter__(self):\n return self.series.__iter__()\n\n def __len__(self):\n return self.series.__len__()\n\n def __getitem__(self, item):\n return self.series.__getitem__(item)\n\n def __setitem__(self, key, value):\n return_val = self.series.__setitem__(key, value)\n self.parent_df.loc[self._loc] = self.series\n return return_val\n\n def __getattribute__(self, item):\n default_behaviors = [\n \"__init__\",\n \"series\",\n \"parent_df\",\n \"_loc\",\n \"__arithmetic_op__\",\n \"__comparisons__\",\n \"__class__\",\n ]\n if item not in default_behaviors:\n method = self.series.__getattribute__(item)\n # Certain operations like `at`, `loc`, `iloc`, etc. are callable because in\n # pandas they are equivalent to classes. They are verified here because they\n # cannot be overridden with the functions below. This generally solves the\n # problem where the instance property is callable, but the class property is\n # not.\n is_callable = callable(method) and callable(\n getattr(type(self.series), item)\n )\n try:\n has_inplace_param = is_callable and \"inplace\" in str(\n inspect.signature(method)\n )\n # This will occur on Python2\n except AttributeError:\n has_inplace_param = is_callable and \"inplace\" in str(\n inspect.getargspec(method)\n )\n\n if is_callable and has_inplace_param and self.parent_df is not None:\n\n def inplace_handler(*args, **kwargs):\n \"\"\"Replaces the default behavior of methods with inplace kwarg.\n\n Note: This method will modify the DataFrame this Series is attached\n to when `inplace` is True. Instead of rewriting or overriding\n every method that uses `inplace`, we use this handler.\n\n This handler will first check that the keyword argument passed\n for `inplace` is True, if not then it will just return the\n result of the operation requested.\n\n If `inplace` is True, do the operation, keeping track of the\n previous length. This is because operations like `dropna` still\n propagate back to the DataFrame that holds the Series.\n\n If the length did not change, we propagate the inplace changes\n of the operation back to the original DataFrame with\n `__setitem__`.\n\n If the length changed, we just need to do a `reindex` on the\n parent DataFrame. This will propagate the inplace operation\n (e.g. `dropna`) back to the parent DataFrame.\n\n See notes in SeriesView class about when it is okay to return a\n pandas Series vs a SeriesView.\n\n Returns:\n If `inplace` is True: None, else: A new Series.\n \"\"\"\n if kwargs.get(\"inplace\", False):\n prev_len = len(self.series)\n self.series.__getattribute__(item)(*args, **kwargs)\n if prev_len == len(self.series):\n self.parent_df.loc[self._loc] = self.series\n else:\n self.parent_df.reindex(index=self.series.index, copy=False)\n return None\n else:\n return self.series.__getattribute__(item)(*args, **kwargs)\n\n # We replace the method with `inplace_handler` for inplace operations\n method = inplace_handler\n elif is_callable:\n\n def other_handler(*args, **kwargs):\n \"\"\"Replaces the method's args and kwargs with the Series object.\n\n Note: This method is needed because sometimes operations like\n `df['col0'].equals(df['col1'])` do not return the correct value.\n This mostly has occurred in Python2, but overriding of the\n method will make the behavior more deterministic for all calls.\n\n Returns the result of `__getattribute__` from the Series this wraps.\n \"\"\"\n args = tuple(\n arg if not isinstance(arg, SeriesView) else arg.series\n for arg in args\n )\n kwargs = {\n kw: arg if not isinstance(arg, SeriesView) else arg.series\n for kw, arg in kwargs.items()\n }\n return self.series.__getattribute__(item)(*args, **kwargs)\n\n method = other_handler\n return method\n # We need to do this hack for equality checking.\n elif item == \"__class__\":\n return self.series.__class__\n else:\n return object.__getattribute__(self, item)\n\n\nclass Series(object):\n def __init__(self, series_oids):\n \"\"\"Constructor for a Series object.\n\n Args:\n series_oids ([ObjectID]): The list of remote Series objects.\n \"\"\"\n self.series_oids = series_oids\n\n @property\n def T(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __abs__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __add__(self, right, name=\"__add__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __and__(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __array__(self, result=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __array_prepare__(self, result, context=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def __array_priority__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __array_wrap__(self, result, context=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __bool__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __bytes__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __class__(\n self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __contains__(self, key):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __copy__(self, deep=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __deepcopy__(self, memo=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __delitem__(self, key):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __dir__(self):\n return list(type(self).__dict__.keys())\n\n def __div__(self, right, name=\"__truediv__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __divmod__(self, right, name=\"__divmod__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def __doc__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __eq__(self, other, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __finalize__(self, other, method=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __float__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __floordiv__(self, right, name=\"__floordiv__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __ge__(self, other, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __getitem__(self, key):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __getstate__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __gt__(self, other, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __iadd__(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __imul__(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __int__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __invert__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __ipow__(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __isub__(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __iter__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __itruediv__(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __le__(self, other, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __len__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __long__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __lt__(self, other, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __mod__(self, right, name=\"__mod__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __mul__(self, right, name=\"__mul__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __ne__(self, other, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __neg__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __nonzero__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __or__(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __pow__(self, right, name=\"__pow__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __repr__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __round__(self, decimals=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __setitem__(self, key, value):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __setstate__(self, state):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __sizeof__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __str__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __sub__(self, right, name=\"__sub__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __truediv__(self, right, name=\"__truediv__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __xor__(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def abs(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def add(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def add_prefix(self, prefix):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def add_suffix(self, suffix):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def agg(self, func, axis=0, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def aggregate(self, func, axis=0, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def align(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy=True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n broadcast_axis=None,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def all(self, axis=None, bool_only=None, skipna=None, level=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def any(self, axis=None, bool_only=None, skipna=None, level=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def append(self, to_append, ignore_index=False, verify_integrity=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def apply(self, func, convert_dtype=True, args=(), **kwds):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def argmax(self, axis=None, skipna=True, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def argmin(self, axis=None, skipna=True, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def argsort(self, axis=0, kind=\"quicksort\", order=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def as_blocks(self, copy=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def as_matrix(self, columns=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def asof(self, where, subset=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def astype(self, dtype, copy=True, errors=\"raise\", **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def at(self, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def at_time(self, time, asof=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def autocorr(self, lag=1):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def between(self, left, right, inclusive=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def between_time(self, start_time, end_time, include_start=True, include_end=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def bfill(self, axis=None, inplace=False, limit=None, downcast=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def bool(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def clip(self, lower=None, upper=None, axis=None, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def clip_lower(self, threshold, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def clip_upper(self, threshold, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def combine(self, other, func, fill_value=np.nan):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def combine_first(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def compound(self, axis=None, skipna=None, level=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def compress(self, condition, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def consolidate(self, inplace=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def convert_objects(\n self,\n convert_dates=True,\n convert_numeric=False,\n convert_timedeltas=True,\n copy=True,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def copy(self, deep=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def corr(self, other, method=\"pearson\", min_periods=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def count(self, level=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def cov(self, other, min_periods=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def cummax(self, axis=None, skipna=True, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def cummin(self, axis=None, skipna=True, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def cumprod(self, axis=None, skipna=True, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def cumsum(self, axis=None, skipna=True, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def describe(self, percentiles=None, include=None, exclude=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def diff(self, periods=1):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def div(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def divide(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def dot(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def drop(self, labels, axis=0, level=None, inplace=False, errors=\"raise\"):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def drop_duplicates(self, keep=\"first\", inplace=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def dropna(self, axis=0, inplace=False, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def duplicated(self, keep=\"first\"):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def eq(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def equals(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def ewm(\n self,\n com=None,\n span=None,\n halflife=None,\n alpha=None,\n min_periods=0,\n freq=None,\n adjust=True,\n ignore_na=False,\n axis=0,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def expanding(self, min_periods=1, freq=None, center=False, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def factorize(self, sort=False, na_sentinel=-1):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def ffill(self, axis=None, inplace=False, limit=None, downcast=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def fillna(\n self,\n value=None,\n method=None,\n axis=None,\n inplace=False,\n limit=None,\n downcast=None,\n **kwargs\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def filter(self, items=None, like=None, regex=None, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def first(self, offset):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def first_valid_index(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def floordiv(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def from_array(\n self, arr, index=None, name=None, dtype=None, copy=False, fastpath=False\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def from_csv(\n self,\n path,\n sep=\",\",\n parse_dates=True,\n header=None,\n index_col=0,\n encoding=None,\n infer_datetime_format=False,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def ge(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def get(self, key, default=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def get_dtype_counts(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def get_ftype_counts(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def get_value(self, label, takeable=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def get_values(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def groupby(\n self,\n by=None,\n axis=0,\n level=None,\n as_index=True,\n sort=True,\n group_keys=True,\n squeeze=False,\n **kwargs\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def gt(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def head(self, n=5):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def hist(\n self,\n by=None,\n ax=None,\n grid=True,\n xlabelsize=None,\n xrot=None,\n ylabelsize=None,\n yrot=None,\n figsize=None,\n bins=10,\n **kwds\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def iat(self, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def idxmax(self, axis=None, skipna=True, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def idxmin(self, axis=None, skipna=True, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def iloc(self, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def interpolate(\n self,\n method=\"linear\",\n axis=0,\n limit=None,\n inplace=False,\n limit_direction=\"forward\",\n downcast=None,\n **kwargs\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def isin(self, values):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def isnull(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def item(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def items(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def iteritems(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def ix(self, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def keys(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def kurtosis(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def last(self, offset):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def last_valid_index(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def le(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def loc(self, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def lt(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def mad(self, axis=None, skipna=None, level=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def map(self, arg, na_action=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def mask(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n try_cast=False,\n raise_on_error=True,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def max(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def memory_usage(self, index=True, deep=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def mod(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def mode(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def mul(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def multiply(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def ne(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def nlargest(self, n=5, keep=\"first\"):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def nonzero(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def notnull(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def nsmallest(self, n=5, keep=\"first\"):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def nunique(self, dropna=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def pct_change(self, periods=1, fill_method=\"pad\", limit=None, freq=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def pipe(self, func, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def plot(\n self,\n kind=\"line\",\n ax=None,\n figsize=None,\n use_index=True,\n title=None,\n grid=None,\n legend=False,\n style=None,\n logx=False,\n logy=False,\n loglog=False,\n xticks=None,\n yticks=None,\n xlim=None,\n ylim=None,\n rot=None,\n fontsize=None,\n colormap=None,\n table=False,\n yerr=None,\n xerr=None,\n label=None,\n secondary_y=False,\n **kwds\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def pop(self, item):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def pow(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def prod(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def product(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def ptp(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def put(self, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def quantile(self, q=0.5, interpolation=\"linear\"):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def radd(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rank(\n self,\n axis=0,\n method=\"average\",\n numeric_only=None,\n na_option=\"keep\",\n ascending=True,\n pct=False,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def ravel(self, order=\"C\"):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rdiv(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def reindex(self, index=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def reindex_axis(self, labels, axis=0, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def reindex_like(self, other, method=None, copy=True, limit=None, tolerance=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rename(self, index=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rename_axis(self, mapper, axis=0, copy=True, inplace=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def reorder_levels(self, order):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def repeat(self, repeats, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def replace(\n self,\n to_replace=None,\n value=None,\n inplace=False,\n limit=None,\n regex=False,\n method=\"pad\",\n axis=None,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def resample(\n self,\n rule,\n how=None,\n axis=0,\n fill_method=None,\n closed=None,\n label=None,\n convention=\"start\",\n kind=None,\n loffset=None,\n limit=None,\n base=0,\n on=None,\n level=None,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def reset_index(self, level=None, drop=False, name=None, inplace=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def reshape(self, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rfloordiv(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rmod(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rmul(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rolling(\n self,\n window,\n min_periods=None,\n freq=None,\n center=False,\n win_type=None,\n on=None,\n axis=0,\n closed=None,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def round(self, decimals=0, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rpow(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rsub(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rtruediv(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def sample(\n self,\n n=None,\n frac=None,\n replace=False,\n weights=None,\n random_state=None,\n axis=None,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def searchsorted(self, value, side=\"left\", sorter=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def select(self, crit, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def sem(\n self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def set_axis(self, axis, labels):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def set_value(self, label, value, takeable=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def shift(self, periods=1, freq=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def skew(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def slice_shift(self, periods=1, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def sort_index(\n self,\n axis=0,\n level=None,\n ascending=True,\n inplace=False,\n kind=\"quicksort\",\n na_position=\"last\",\n sort_remaining=True,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def sort_values(\n self,\n axis=0,\n ascending=True,\n inplace=False,\n kind=\"quicksort\",\n na_position=\"last\",\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def sortlevel(self, level=0, ascending=True, sort_remaining=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def squeeze(self, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def std(\n self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def sub(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def subtract(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def sum(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def swapaxes(self, axis1, axis2, copy=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def swaplevel(self, i=-2, j=-1, copy=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def tail(self, n=5):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def take(self, indices, axis=0, convert=True, is_copy=False, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_clipboard(self, excel=None, sep=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_csv(\n self,\n path=None,\n index=True,\n sep=\",\",\n na_rep=\"\",\n float_format=None,\n header=False,\n index_label=None,\n mode=\"w\",\n encoding=None,\n date_format=None,\n decimal=\".\",\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_dense(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_dict(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_excel(\n self,\n excel_writer,\n sheet_name=\"Sheet1\",\n na_rep=\"\",\n float_format=None,\n columns=None,\n header=True,\n index=True,\n index_label=None,\n startrow=0,\n startcol=0,\n engine=None,\n merge_cells=True,\n encoding=None,\n inf_rep=\"inf\",\n verbose=True,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_frame(self, name=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_hdf(self, path_or_buf, key, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_json(\n self,\n path_or_buf=None,\n orient=None,\n date_format=None,\n double_precision=10,\n force_ascii=True,\n date_unit=\"ms\",\n default_handler=None,\n lines=False,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_latex(\n self,\n buf=None,\n columns=None,\n col_space=None,\n header=True,\n index=True,\n na_rep=\"NaN\",\n formatters=None,\n float_format=None,\n sparsify=None,\n index_names=True,\n bold_rows=False,\n column_format=None,\n longtable=None,\n escape=None,\n encoding=None,\n decimal=\".\",\n multicolumn=None,\n multicolumn_format=None,\n multirow=None,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_msgpack(self, path_or_buf=None, encoding=\"utf-8\", **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_period(self, freq=None, copy=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_pickle(self, path, compression=\"infer\"):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_sparse(self, kind=\"block\", fill_value=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_sql(\n self,\n name,\n con,\n flavor=None,\n schema=None,\n if_exists=\"fail\",\n index=True,\n index_label=None,\n chunksize=None,\n dtype=None,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_string(\n self,\n buf=None,\n na_rep=\"NaN\",\n float_format=None,\n header=True,\n index=True,\n length=False,\n dtype=False,\n name=False,\n max_rows=None,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_timestamp(self, freq=None, how=\"start\", copy=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_xarray(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def tolist(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def transform(self, func, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def transpose(self, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def truediv(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def truncate(self, before=None, after=None, axis=None, copy=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def tshift(self, periods=1, freq=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def tz_convert(self, tz, axis=0, level=None, copy=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def tz_localize(self, tz, axis=0, level=None, copy=True, ambiguous=\"raise\"):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def unique(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def unstack(self, level=-1, fill_value=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def upandasate(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def valid(self, inplace=False, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def value_counts(\n self, normalize=False, sort=True, ascending=False, bins=None, dropna=True\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def var(\n self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def view(self, dtype=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def where(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n try_cast=False,\n raise_on_error=True,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def xs(key, axis=0, level=None, drop_level=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def asobject(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def axes(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def base(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def blocks(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def data(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def dtype(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def dtypes(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def empty(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def flags(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def ftype(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def ftypes(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def hasnans(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def imag(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def index(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def is_copy(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def is_monotonic(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def is_monotonic_decreasing(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def is_monotonic_increasing(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def is_unique(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def itemsize(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def name(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def nbytes(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def ndim(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def real(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def shape(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def size(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def strides(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def values(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n",
"path": "modin/pandas/series.py"
}
] | [
{
"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pandas\nimport inspect\nimport numpy as np\n\n\n# from .utils import _inherit_docstrings\n\n\ndef na_op():\n \"\"\"Pandas uses a similar function to handle na values.\n \"\"\"\n raise NotImplementedError(\"Not Yet implemented.\")\n\n\nclass SeriesView(object):\n \"\"\"A wrapper class for pandas Series.\n\n Note: The main use of this class is to help us implement inplace operations that\n propagate their changes back to the DataFrame that a Series belongs to. We are\n only need to use this object when `__getitem__` returns a pandas Series, or when\n `loc`/`iloc` return a Series as well.\n\n Important: This is not needed to replace every Series in Modin. For example, when an\n operation on a Series returns a new Series, it does not need to return an object\n of this class. It can return a Series because the new object does not have a\n DataFrame that it is associated with.\n\n \"\"\"\n\n def __init__(self, series, parent_df, loc):\n assert type(series) is pandas.Series\n from .dataframe import DataFrame\n\n assert type(parent_df) is DataFrame\n assert type(loc) is tuple\n self.series = series\n self.parent_df = parent_df\n self._loc = loc\n\n def __repr__(self):\n return repr(self.series)\n\n def __str__(self):\n return str(self.series)\n\n def __comparisons__(self, func):\n def compare_func(other):\n if hasattr(other, \"series\"):\n other = other.series\n return getattr(self.series, func)(other)\n\n return compare_func\n\n def __eq__(self, other):\n return self.__comparisons__(\"__eq__\")(other)\n\n def __ge__(self, other):\n return self.__comparisons__(\"__ge__\")(other)\n\n def __gt__(self, other):\n return self.__comparisons__(\"__gt__\")(other)\n\n def __le__(self, other):\n return self.__comparisons__(\"__le__\")(other)\n\n def __lt__(self, other):\n return self.__comparisons__(\"__lt__\")(other)\n\n def __ne__(self, other):\n return self.__comparisons__(\"__ne__\")(other)\n\n def __arithmetic_op__(self, func):\n def arithemtic_op(other):\n if hasattr(other, \"series\"):\n other = other.series\n return getattr(self.series, func)(other)\n\n return arithemtic_op\n\n def __add__(self, other):\n return self.__arithmetic_op__(\"__add__\")(other)\n\n def __mul__(self, other):\n return self.__arithmetic_op__(\"__mul__\")(other)\n\n def __sub__(self, other):\n return self.__arithmetic_op__(\"__sub__\")(other)\n\n def __truediv__(self, other):\n return self.__arithmetic_op__(\"__truediv__\")(other)\n\n def __floordiv__(self, other):\n return self.__arithmetic_op__(\"__floordiv__\")(other)\n\n def __mod__(self, other):\n return self.__arithmetic_op__(\"__mod__\")(other)\n\n def __pow__(self, other):\n return self.__arithmetic_op__(\"__pow__\")(other)\n\n def __radd__(self, other):\n return self.__arithmetic_op__(\"__radd__\")(other)\n\n def __rmul__(self, other):\n return self.__arithmetic_op__(\"__rmul__\")(other)\n\n def __rsub__(self, other):\n return self.__arithmetic_op__(\"__rsub__\")(other)\n\n def __rtruediv__(self, other):\n return self.__arithmetic_op__(\"__rtruediv__\")(other)\n\n def __rfloordiv__(self, other):\n return self.__arithmetic_op__(\"__rfloordiv__\")(other)\n\n def __rmod__(self, other):\n return self.__arithmetic_op__(\"__rmod__\")(other)\n\n def __rpow__(self, other):\n return self.__arithmetic_op__(\"__rpow__\")(other)\n\n def __iadd__(self, other):\n return self.__arithmetic_op__(\"__iadd__\")(other)\n\n def __imul__(self, other):\n return self.__arithmetic_op__(\"__imul__\")(other)\n\n def __isub__(self, other):\n return self.__arithmetic_op__(\"__isub__\")(other)\n\n def __itruediv__(self, other):\n return self.__arithmetic_op__(\"__itruediv__\")(other)\n\n def __ifloordiv__(self, other):\n return self.__arithmetic_op__(\"__ifloordiv__\")(other)\n\n def __imod__(self, other):\n return self.__arithmetic_op__(\"__imod__\")(other)\n\n def __ipow__(self, other):\n return self.__arithmetic_op__(\"__ipow__\")(other)\n\n def __neg__(self, other):\n return self.__arithmetic_op__(\"__neg__\")(other)\n\n def __abs__(self):\n return self.series.abs()\n\n def __iter__(self):\n return self.series.__iter__()\n\n def __len__(self):\n return self.series.__len__()\n\n def __getitem__(self, item):\n return self.series.__getitem__(item)\n\n def __setitem__(self, key, value):\n return_val = self.series.__setitem__(key, value)\n self.parent_df.loc[self._loc] = self.series\n return return_val\n\n def __getattribute__(self, item):\n default_behaviors = [\n \"__init__\",\n \"series\",\n \"parent_df\",\n \"_loc\",\n \"__arithmetic_op__\",\n \"__comparisons__\",\n \"__class__\",\n ]\n if item not in default_behaviors:\n method = self.series.__getattribute__(item)\n # Certain operations like `at`, `loc`, `iloc`, etc. are callable because in\n # pandas they are equivalent to classes. They are verified here because they\n # cannot be overridden with the functions below. This generally solves the\n # problem where the instance property is callable, but the class property is\n # not.\n is_callable = callable(method) and callable(\n getattr(type(self.series), item)\n )\n try:\n has_inplace_param = is_callable and \"inplace\" in str(\n inspect.signature(method)\n )\n # This will occur on Python2\n except AttributeError:\n has_inplace_param = is_callable and \"inplace\" in str(\n inspect.getargspec(method)\n )\n\n if is_callable and has_inplace_param and self.parent_df is not None:\n\n def inplace_handler(*args, **kwargs):\n \"\"\"Replaces the default behavior of methods with inplace kwarg.\n\n Note: This method will modify the DataFrame this Series is attached\n to when `inplace` is True. Instead of rewriting or overriding\n every method that uses `inplace`, we use this handler.\n\n This handler will first check that the keyword argument passed\n for `inplace` is True, if not then it will just return the\n result of the operation requested.\n\n If `inplace` is True, do the operation, keeping track of the\n previous length. This is because operations like `dropna` still\n propagate back to the DataFrame that holds the Series.\n\n If the length did not change, we propagate the inplace changes\n of the operation back to the original DataFrame with\n `__setitem__`.\n\n If the length changed, we just need to do a `reindex` on the\n parent DataFrame. This will propagate the inplace operation\n (e.g. `dropna`) back to the parent DataFrame.\n\n See notes in SeriesView class about when it is okay to return a\n pandas Series vs a SeriesView.\n\n Returns:\n If `inplace` is True: None, else: A new Series.\n \"\"\"\n if kwargs.get(\"inplace\", False):\n prev_len = len(self.series)\n self.series.__getattribute__(item)(*args, **kwargs)\n if prev_len == len(self.series):\n self.parent_df.loc[self._loc] = self.series\n else:\n self.parent_df.reindex(index=self.series.index, copy=False)\n return None\n else:\n return self.series.__getattribute__(item)(*args, **kwargs)\n\n # We replace the method with `inplace_handler` for inplace operations\n method = inplace_handler\n elif is_callable:\n\n def other_handler(*args, **kwargs):\n \"\"\"Replaces the method's args and kwargs with the Series object.\n\n Note: This method is needed because sometimes operations like\n `df['col0'].equals(df['col1'])` do not return the correct value.\n This mostly has occurred in Python2, but overriding of the\n method will make the behavior more deterministic for all calls.\n\n Returns the result of `__getattribute__` from the Series this wraps.\n \"\"\"\n args = tuple(\n arg if not isinstance(arg, SeriesView) else arg.series\n for arg in args\n )\n kwargs = {\n kw: arg if not isinstance(arg, SeriesView) else arg.series\n for kw, arg in kwargs.items()\n }\n return self.series.__getattribute__(item)(*args, **kwargs)\n\n method = other_handler\n return method\n # We need to do this hack for equality checking.\n elif item == \"__class__\":\n return self.series.__class__\n else:\n return object.__getattribute__(self, item)\n\n\nclass Series(object):\n def __init__(self, series_oids):\n \"\"\"Constructor for a Series object.\n\n Args:\n series_oids ([ObjectID]): The list of remote Series objects.\n \"\"\"\n self.series_oids = series_oids\n\n @property\n def T(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __abs__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __add__(self, right, name=\"__add__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __and__(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __array__(self, result=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __array_prepare__(self, result, context=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def __array_priority__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __array_wrap__(self, result, context=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __bool__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __bytes__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __class__(\n self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __contains__(self, key):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __copy__(self, deep=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __deepcopy__(self, memo=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __delitem__(self, key):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __dir__(self):\n return list(type(self).__dict__.keys())\n\n def __div__(self, right, name=\"__truediv__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __divmod__(self, right, name=\"__divmod__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def __doc__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __eq__(self, other, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __finalize__(self, other, method=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __float__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __floordiv__(self, right, name=\"__floordiv__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __ge__(self, other, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __getitem__(self, key):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __getstate__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __gt__(self, other, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __iadd__(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __imul__(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __int__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __invert__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __ipow__(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __isub__(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __iter__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __itruediv__(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __le__(self, other, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __len__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __long__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __lt__(self, other, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __mod__(self, right, name=\"__mod__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __mul__(self, right, name=\"__mul__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __ne__(self, other, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __neg__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __nonzero__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __or__(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __pow__(self, right, name=\"__pow__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __repr__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __round__(self, decimals=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __setitem__(self, key, value):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __setstate__(self, state):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __sizeof__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __str__(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __sub__(self, right, name=\"__sub__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __truediv__(self, right, name=\"__truediv__\", na_op=na_op):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def __xor__(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def abs(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def add(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def add_prefix(self, prefix):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def add_suffix(self, suffix):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def agg(self, func, axis=0, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def aggregate(self, func, axis=0, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def align(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy=True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n broadcast_axis=None,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def all(self, axis=None, bool_only=None, skipna=None, level=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def any(self, axis=None, bool_only=None, skipna=None, level=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def append(self, to_append, ignore_index=False, verify_integrity=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def apply(self, func, convert_dtype=True, args=(), **kwds):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def argmax(self, axis=None, skipna=True, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def argmin(self, axis=None, skipna=True, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def argsort(self, axis=0, kind=\"quicksort\", order=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def as_blocks(self, copy=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def as_matrix(self, columns=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def asof(self, where, subset=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def astype(self, dtype, copy=True, errors=\"raise\", **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def at(self, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def at_time(self, time, asof=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def autocorr(self, lag=1):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def between(self, left, right, inclusive=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def between_time(self, start_time, end_time, include_start=True, include_end=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def bfill(self, axis=None, inplace=False, limit=None, downcast=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def bool(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def clip(self, lower=None, upper=None, axis=None, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def clip_lower(self, threshold, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def clip_upper(self, threshold, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def combine(self, other, func, fill_value=np.nan):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def combine_first(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def compound(self, axis=None, skipna=None, level=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def compress(self, condition, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def consolidate(self, inplace=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def convert_objects(\n self,\n convert_dates=True,\n convert_numeric=False,\n convert_timedeltas=True,\n copy=True,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def copy(self, deep=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def corr(self, other, method=\"pearson\", min_periods=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def count(self, level=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def cov(self, other, min_periods=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def cummax(self, axis=None, skipna=True, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def cummin(self, axis=None, skipna=True, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def cumprod(self, axis=None, skipna=True, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def cumsum(self, axis=None, skipna=True, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def describe(self, percentiles=None, include=None, exclude=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def diff(self, periods=1):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def div(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def divide(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def dot(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def drop(self, labels, axis=0, level=None, inplace=False, errors=\"raise\"):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def drop_duplicates(self, keep=\"first\", inplace=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def dropna(self, axis=0, inplace=False, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def duplicated(self, keep=\"first\"):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def eq(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def equals(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def ewm(\n self,\n com=None,\n span=None,\n halflife=None,\n alpha=None,\n min_periods=0,\n freq=None,\n adjust=True,\n ignore_na=False,\n axis=0,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def expanding(self, min_periods=1, freq=None, center=False, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def factorize(self, sort=False, na_sentinel=-1):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def ffill(self, axis=None, inplace=False, limit=None, downcast=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def fillna(\n self,\n value=None,\n method=None,\n axis=None,\n inplace=False,\n limit=None,\n downcast=None,\n **kwargs\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def filter(self, items=None, like=None, regex=None, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def first(self, offset):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def first_valid_index(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def floordiv(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def from_array(\n self, arr, index=None, name=None, dtype=None, copy=False, fastpath=False\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def from_csv(\n self,\n path,\n sep=\",\",\n parse_dates=True,\n header=None,\n index_col=0,\n encoding=None,\n infer_datetime_format=False,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def ge(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def get(self, key, default=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def get_dtype_counts(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def get_ftype_counts(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def get_value(self, label, takeable=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def get_values(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def groupby(\n self,\n by=None,\n axis=0,\n level=None,\n as_index=True,\n sort=True,\n group_keys=True,\n squeeze=False,\n **kwargs\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def gt(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def head(self, n=5):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def hist(\n self,\n by=None,\n ax=None,\n grid=True,\n xlabelsize=None,\n xrot=None,\n ylabelsize=None,\n yrot=None,\n figsize=None,\n bins=10,\n **kwds\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def iat(self, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def idxmax(self, axis=None, skipna=True, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def idxmin(self, axis=None, skipna=True, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def iloc(self, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def interpolate(\n self,\n method=\"linear\",\n axis=0,\n limit=None,\n inplace=False,\n limit_direction=\"forward\",\n downcast=None,\n **kwargs\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def isin(self, values):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def isnull(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def item(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def items(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def iteritems(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def ix(self, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def keys(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def kurtosis(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def last(self, offset):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def last_valid_index(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def le(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def loc(self, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def lt(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def mad(self, axis=None, skipna=None, level=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def map(self, arg, na_action=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def mask(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n try_cast=False,\n raise_on_error=True,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def max(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def memory_usage(self, index=True, deep=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def mod(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def mode(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def mul(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def multiply(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def ne(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def nlargest(self, n=5, keep=\"first\"):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def nonzero(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def notnull(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def nsmallest(self, n=5, keep=\"first\"):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def nunique(self, dropna=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def pct_change(self, periods=1, fill_method=\"pad\", limit=None, freq=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def pipe(self, func, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def plot(\n self,\n kind=\"line\",\n ax=None,\n figsize=None,\n use_index=True,\n title=None,\n grid=None,\n legend=False,\n style=None,\n logx=False,\n logy=False,\n loglog=False,\n xticks=None,\n yticks=None,\n xlim=None,\n ylim=None,\n rot=None,\n fontsize=None,\n colormap=None,\n table=False,\n yerr=None,\n xerr=None,\n label=None,\n secondary_y=False,\n **kwds\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def pop(self, item):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def pow(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def prod(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def product(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def ptp(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def put(self, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def quantile(self, q=0.5, interpolation=\"linear\"):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def radd(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rank(\n self,\n axis=0,\n method=\"average\",\n numeric_only=None,\n na_option=\"keep\",\n ascending=True,\n pct=False,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def ravel(self, order=\"C\"):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rdiv(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def reindex(self, index=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def reindex_axis(self, labels, axis=0, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def reindex_like(self, other, method=None, copy=True, limit=None, tolerance=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rename(self, index=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rename_axis(self, mapper, axis=0, copy=True, inplace=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def reorder_levels(self, order):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def repeat(self, repeats, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def replace(\n self,\n to_replace=None,\n value=None,\n inplace=False,\n limit=None,\n regex=False,\n method=\"pad\",\n axis=None,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def resample(\n self,\n rule,\n how=None,\n axis=0,\n fill_method=None,\n closed=None,\n label=None,\n convention=\"start\",\n kind=None,\n loffset=None,\n limit=None,\n base=0,\n on=None,\n level=None,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def reset_index(self, level=None, drop=False, name=None, inplace=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def reshape(self, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rfloordiv(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rmod(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rmul(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rolling(\n self,\n window,\n min_periods=None,\n freq=None,\n center=False,\n win_type=None,\n on=None,\n axis=0,\n closed=None,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def round(self, decimals=0, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rpow(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rsub(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def rtruediv(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def sample(\n self,\n n=None,\n frac=None,\n replace=False,\n weights=None,\n random_state=None,\n axis=None,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def searchsorted(self, value, side=\"left\", sorter=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def select(self, crit, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def sem(\n self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def set_axis(self, axis, labels):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def set_value(self, label, value, takeable=False):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def shift(self, periods=1, freq=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def skew(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def slice_shift(self, periods=1, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def sort_index(\n self,\n axis=0,\n level=None,\n ascending=True,\n inplace=False,\n kind=\"quicksort\",\n na_position=\"last\",\n sort_remaining=True,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def sort_values(\n self,\n axis=0,\n ascending=True,\n inplace=False,\n kind=\"quicksort\",\n na_position=\"last\",\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def sortlevel(self, level=0, ascending=True, sort_remaining=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def squeeze(self, axis=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def std(\n self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def sub(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def subtract(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def sum(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def swapaxes(self, axis1, axis2, copy=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def swaplevel(self, i=-2, j=-1, copy=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def tail(self, n=5):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def take(self, indices, axis=0, convert=True, is_copy=False, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_clipboard(self, excel=None, sep=None, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_csv(\n self,\n path=None,\n index=True,\n sep=\",\",\n na_rep=\"\",\n float_format=None,\n header=False,\n index_label=None,\n mode=\"w\",\n encoding=None,\n date_format=None,\n decimal=\".\",\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_dense(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_dict(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_excel(\n self,\n excel_writer,\n sheet_name=\"Sheet1\",\n na_rep=\"\",\n float_format=None,\n columns=None,\n header=True,\n index=True,\n index_label=None,\n startrow=0,\n startcol=0,\n engine=None,\n merge_cells=True,\n encoding=None,\n inf_rep=\"inf\",\n verbose=True,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_frame(self, name=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_hdf(self, path_or_buf, key, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_json(\n self,\n path_or_buf=None,\n orient=None,\n date_format=None,\n double_precision=10,\n force_ascii=True,\n date_unit=\"ms\",\n default_handler=None,\n lines=False,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_latex(\n self,\n buf=None,\n columns=None,\n col_space=None,\n header=True,\n index=True,\n na_rep=\"NaN\",\n formatters=None,\n float_format=None,\n sparsify=None,\n index_names=True,\n bold_rows=False,\n column_format=None,\n longtable=None,\n escape=None,\n encoding=None,\n decimal=\".\",\n multicolumn=None,\n multicolumn_format=None,\n multirow=None,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_msgpack(self, path_or_buf=None, encoding=\"utf-8\", **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_period(self, freq=None, copy=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_pickle(self, path, compression=\"infer\"):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_sparse(self, kind=\"block\", fill_value=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_sql(\n self,\n name,\n con,\n flavor=None,\n schema=None,\n if_exists=\"fail\",\n index=True,\n index_label=None,\n chunksize=None,\n dtype=None,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_string(\n self,\n buf=None,\n na_rep=\"NaN\",\n float_format=None,\n header=True,\n index=True,\n length=False,\n dtype=False,\n name=False,\n max_rows=None,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_timestamp(self, freq=None, how=\"start\", copy=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def to_xarray(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def tolist(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def transform(self, func, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def transpose(self, *args, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def truediv(self, other, level=None, fill_value=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def truncate(self, before=None, after=None, axis=None, copy=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def tshift(self, periods=1, freq=None, axis=0):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def tz_convert(self, tz, axis=0, level=None, copy=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def tz_localize(self, tz, axis=0, level=None, copy=True, ambiguous=\"raise\"):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def unique(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def unstack(self, level=-1, fill_value=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def upandasate(self, other):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def valid(self, inplace=False, **kwargs):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def value_counts(\n self, normalize=False, sort=True, ascending=False, bins=None, dropna=True\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def var(\n self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def view(self, dtype=None):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def where(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n try_cast=False,\n raise_on_error=True,\n ):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n def xs(key, axis=0, level=None, drop_level=True):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def asobject(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def axes(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def base(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def blocks(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def data(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def dtype(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def dtypes(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def empty(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def flags(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def ftype(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def ftypes(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def hasnans(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def imag(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def index(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def is_copy(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def is_monotonic(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def is_monotonic_decreasing(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def is_monotonic_increasing(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def is_unique(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def itemsize(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def name(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def nbytes(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def ndim(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def real(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def shape(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def size(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def strides(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n\n @property\n def values(self):\n raise NotImplementedError(\"Not Yet implemented.\")\n",
"path": "modin/pandas/series.py"
}
] | diff --git a/modin/pandas/series.py b/modin/pandas/series.py
index e983dfb1a23..c32996af65f 100644
--- a/modin/pandas/series.py
+++ b/modin/pandas/series.py
@@ -147,6 +147,9 @@ def __ipow__(self, other):
def __neg__(self, other):
return self.__arithmetic_op__("__neg__")(other)
+ def __abs__(self):
+ return self.series.abs()
+
def __iter__(self):
return self.series.__iter__()
|
CiviWiki__OpenCiviWiki-1060 | Move user/account-related templates out of `threads` app
### Idea summary
There are several user- and account-related templates in the `threads` app. They should reside in the `accounts` app instead.
### Further details
Move all of the following templates from the `threads` app to the `accounts` app:
- [ ] `threads/templates/threads/base/less_headers/account_less.html` -> `accounts/templates/accounts/base/less_headers/account_less.html`
- [ ] `threads/templates/threads/base/less_headers/login_less.html` -> `accounts/templates/accounts/base/less_headers/login_less.html`
- [ ] `threads/templates/threads/partials/account/*` to `accounts/templates/accounts/partials/account/*`
- [ ] `threads/templates/threads/partials/feed/*` to `accounts/templates/accounts/partials/feed/*`
- [ ] `threads/templates/threads/partials/login/*` to `accounts/templates/accounts/partials/login/*`
- [ ] `threads/templates/threads/partials/user-setup/*` to `accounts/templates/accounts/partials/user-setup/*`
- [ ] `threads/templates/threads/user/*` -> `accounts/templates/accounts/*`
- [ ] `threads/templates/threads/account.html` -> `accounts/templates/accounts/account.html`
- [ ] `threads/templates/threads/feed.html` -> `accounts/templates/accounts/feed.html`
- [ ] `threads/templates/threads/invite.html` -> `accounts/templates/accounts/invite.html`
- [ ] `threads/templates/threads/user-setup.html` -> `accounts/templates/accounts/user-setup.html`
- [ ] make sure to fix all imports related to the moved files
| [
{
"content": "\"\"\"\nClass based views.\n\nThis module will include views for the accounts app.\n\"\"\"\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic.edit import FormView, UpdateView\nfrom django.views import View\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth import login\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth import get_user_model\nfrom django.utils.encoding import force_str\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.template.response import TemplateResponse\nfrom accounts.models import Profile\nfrom accounts.forms import UserRegistrationForm, ProfileEditForm\nfrom accounts.authentication import send_activation_email, account_activation_token\nfrom django.http import HttpResponseRedirect\n\n\nclass RegisterView(FormView):\n \"\"\"\n A form view that handles user registration.\n \"\"\"\n\n template_name = \"accounts/register/register.html\"\n form_class = UserRegistrationForm\n success_url = \"/\"\n\n def _create_user(self, form):\n username = form.cleaned_data[\"username\"]\n password = form.cleaned_data[\"password\"]\n email = form.cleaned_data[\"email\"]\n\n user = get_user_model().objects.create_user(username, email, password)\n Profile.objects.create(user=user)\n\n return user\n\n def _send_email(self, user):\n domain = get_current_site(self.request).domain\n send_activation_email(user, domain)\n\n def _login(self, user):\n login(self.request, user)\n\n def form_valid(self, form):\n user = self._create_user(form)\n\n self._send_email(user)\n self._login(user)\n\n return super(RegisterView, self).form_valid(form)\n\n\nclass PasswordResetView(auth_views.PasswordResetView):\n template_name = \"accounts/users/password_reset.html\"\n email_template_name = \"accounts/users/password_reset_email.html\"\n subject_template_name = \"accounts/users/password_reset_subject.txt\"\n from_email = settings.EMAIL_HOST_USER\n success_url = reverse_lazy(\"accounts_password_reset_done\")\n\n\nclass PasswordResetDoneView(auth_views.PasswordResetDoneView):\n template_name = \"accounts/users/password_reset_done.html\"\n\n\nclass PasswordResetConfirmView(auth_views.PasswordResetConfirmView):\n template_name = \"accounts/users/password_reset_confirm.html\"\n success_url = reverse_lazy(\"accounts_password_reset_complete\")\n\n\nclass PasswordResetCompleteView(auth_views.PasswordResetCompleteView):\n template_name = \"accounts/users/password_reset_complete.html\"\n\n\nclass SettingsView(LoginRequiredMixin, UpdateView):\n \"\"\"A form view to edit Profile\"\"\"\n\n login_url = 'accounts_login'\n form_class = ProfileEditForm\n success_url = reverse_lazy('accounts_settings')\n template_name = 'accounts/utils/update_settings.html'\n\n def get_object(self, queryset=None):\n return Profile.objects.get(user=self.request.user)\n\n def get_initial(self):\n profile = Profile.objects.get(user=self.request.user)\n self.initial.update({\n \"username\": profile.user.username,\n \"email\": profile.user.email,\n \"first_name\": profile.first_name or None,\n \"last_name\": profile.last_name or None,\n \"about_me\": profile.about_me or None,\n })\n return super(SettingsView, self).get_initial()\n\n\nclass ProfileActivationView(View):\n \"\"\"\n This shows different views to the user when they are verifying\n their account based on whether they are already verified or not.\n \"\"\"\n\n def get(self, request, uidb64, token):\n\n User = get_user_model()\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n profile = Profile.objects.get(user=user)\n if profile.is_verified:\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Already Verified\",\n \"content\": \"You have already verified your email\",\n \"link\": redirect_link,\n }\n return TemplateResponse(request, \"general-message.html\", template_var)\n else:\n profile.is_verified = True\n profile.save()\n\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Verification Successful\",\n \"content\": \"Thank you for verifying your email with CiviWiki\",\n \"link\": redirect_link,\n }\n return TemplateResponse(request, \"general-message.html\", template_var)\n else:\n # invalid link\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Verification Error\",\n \"content\": \"Email could not be verified\",\n \"link\": redirect_link,\n }\n return TemplateResponse(request, \"general-message.html\", template_var)\n\n\nclass ProfileSetupView(LoginRequiredMixin, View):\n \"\"\"A view to make the user profile full_profile\"\"\"\n\n login_url = 'accounts_login'\n\n def get(self, request):\n profile = Profile.objects.get(user=request.user)\n if profile.full_profile:\n return HttpResponseRedirect(\"/\")\n # start temp rep rendering TODO: REMOVE THIS\n else:\n data = {\n \"username\": request.user.username,\n \"email\": request.user.email,\n }\n return TemplateResponse(request, \"user-setup.html\", data)\n",
"path": "project/accounts/views.py"
}
] | [
{
"content": "\"\"\"\nClass based views.\n\nThis module will include views for the accounts app.\n\"\"\"\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic.edit import FormView, UpdateView\nfrom django.views import View\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth import login\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth import get_user_model\nfrom django.utils.encoding import force_str\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.template.response import TemplateResponse\nfrom accounts.models import Profile\nfrom accounts.forms import UserRegistrationForm, ProfileEditForm\nfrom accounts.authentication import send_activation_email, account_activation_token\nfrom django.http import HttpResponseRedirect\n\n\nclass RegisterView(FormView):\n \"\"\"\n A form view that handles user registration.\n \"\"\"\n\n template_name = \"accounts/register/register.html\"\n form_class = UserRegistrationForm\n success_url = \"/\"\n\n def _create_user(self, form):\n username = form.cleaned_data[\"username\"]\n password = form.cleaned_data[\"password\"]\n email = form.cleaned_data[\"email\"]\n\n user = get_user_model().objects.create_user(username, email, password)\n Profile.objects.create(user=user)\n\n return user\n\n def _send_email(self, user):\n domain = get_current_site(self.request).domain\n send_activation_email(user, domain)\n\n def _login(self, user):\n login(self.request, user)\n\n def form_valid(self, form):\n user = self._create_user(form)\n\n self._send_email(user)\n self._login(user)\n\n return super(RegisterView, self).form_valid(form)\n\n\nclass PasswordResetView(auth_views.PasswordResetView):\n template_name = \"accounts/users/password_reset.html\"\n email_template_name = \"accounts/users/password_reset_email.html\"\n subject_template_name = \"accounts/users/password_reset_subject.txt\"\n from_email = settings.EMAIL_HOST_USER\n success_url = reverse_lazy(\"accounts_password_reset_done\")\n\n\nclass PasswordResetDoneView(auth_views.PasswordResetDoneView):\n template_name = \"accounts/users/password_reset_done.html\"\n\n\nclass PasswordResetConfirmView(auth_views.PasswordResetConfirmView):\n template_name = \"accounts/users/password_reset_confirm.html\"\n success_url = reverse_lazy(\"accounts_password_reset_complete\")\n\n\nclass PasswordResetCompleteView(auth_views.PasswordResetCompleteView):\n template_name = \"accounts/users/password_reset_complete.html\"\n\n\nclass SettingsView(LoginRequiredMixin, UpdateView):\n \"\"\"A form view to edit Profile\"\"\"\n\n login_url = 'accounts_login'\n form_class = ProfileEditForm\n success_url = reverse_lazy('accounts_settings')\n template_name = 'accounts/utils/update_settings.html'\n\n def get_object(self, queryset=None):\n return Profile.objects.get(user=self.request.user)\n\n def get_initial(self):\n profile = Profile.objects.get(user=self.request.user)\n self.initial.update({\n \"username\": profile.user.username,\n \"email\": profile.user.email,\n \"first_name\": profile.first_name or None,\n \"last_name\": profile.last_name or None,\n \"about_me\": profile.about_me or None,\n })\n return super(SettingsView, self).get_initial()\n\n\nclass ProfileActivationView(View):\n \"\"\"\n This shows different views to the user when they are verifying\n their account based on whether they are already verified or not.\n \"\"\"\n\n def get(self, request, uidb64, token):\n\n User = get_user_model()\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n profile = Profile.objects.get(user=user)\n if profile.is_verified:\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Already Verified\",\n \"content\": \"You have already verified your email\",\n \"link\": redirect_link,\n }\n return TemplateResponse(request, \"general-message.html\", template_var)\n else:\n profile.is_verified = True\n profile.save()\n\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Verification Successful\",\n \"content\": \"Thank you for verifying your email with CiviWiki\",\n \"link\": redirect_link,\n }\n return TemplateResponse(request, \"general-message.html\", template_var)\n else:\n # invalid link\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Verification Error\",\n \"content\": \"Email could not be verified\",\n \"link\": redirect_link,\n }\n return TemplateResponse(request, \"general-message.html\", template_var)\n\n\nclass ProfileSetupView(LoginRequiredMixin, View):\n \"\"\"A view to make the user profile full_profile\"\"\"\n\n login_url = 'accounts_login'\n\n def get(self, request):\n profile = Profile.objects.get(user=request.user)\n if profile.full_profile:\n return HttpResponseRedirect(\"/\")\n # start temp rep rendering TODO: REMOVE THIS\n else:\n data = {\n \"username\": request.user.username,\n \"email\": request.user.email,\n }\n return TemplateResponse(request, \"accounts/user-setup.html\", data)\n",
"path": "project/accounts/views.py"
}
] | diff --git a/account.html b/account.html
new file mode 100644
index 000000000..e0350ad79
--- /dev/null
+++ b/account.html
@@ -0,0 +1,84 @@
+<!doctype html>
+
+<head>
+ {% load static %}
+ {% include "base/links.html" %}
+ {% include "accounts/base/less_headers/account_less.html" %}
+</head>
+
+
+<!-- Util Partials -->
+<script id="civi-template" type="text/template">
+ {% include "partials/account/utils/account_civi.html" %}
+</script>
+<script id="user-chip-template" type="text/template">
+ {% include "partials/account/utils/user_chip.html" %}
+</script>
+<script id="user-card-template" type="text/template">
+ {% include "partials/account/utils/user_card.html" %}
+</script>
+<script id="issue-template" type="text/template">
+ {% include "partials/account/utils/account_issue.html" %}
+</script>
+<script id="rep-template" type="text/template">
+ {% include "partials/account/utils/representative_card.html" %}
+</script>
+<script id="rep-chip-template" type="text/template">
+ {% verbatim %}
+ <div class="rep bold-text purple-text chip">
+ <img src="https://theunitedstates.io/images/congress/225x275/{{ rep.bioguide_id }}.jpg" alt="rep image">
+ <span>{{rep.title}} {{rep.first_name}} {{rep.last_name}} </span>
+ </div>
+ {% endverbatim %}
+</script>
+
+<!-- account tab templates -->
+<script id="my-civis-template" type="text/template">
+ {% include "partials/account/tabs/my_civis.html" %}
+</script>
+<script id="followers-template" type="text/template">
+ {% include "partials/account/tabs/followers.html" %}
+</script>
+<script id="following-template" type="text/template">
+ {% include "partials/account/tabs/following.html" %}
+</script>
+<script id="my-issues-template" type="text/template">
+ {% include "partials/account/tabs/my_issues.html" %}
+</script>
+
+
+<!-- account base templates -->
+<script id="account-template" type="text/template">
+ {% include "partials/account/account_base.html" %}
+</script>
+<script id="sidebar-template" type="text/template">
+ {% include "partials/account/account_sidebar.html" %}
+</script>
+<script id="settings-template" type="text/template">
+ {% include "partials/account/account_settings.html" %}
+</script>
+
+
+<body>
+ {% include "partials/utils/global_nav.html" %}
+
+ <div id="account"></div>
+</body>
+
+<script type="text/javascript" src="{% static 'js/account.js' %}"></script>
+
+<script type="text/javascript">
+
+ var username = '{{username}}';
+ var current_user = '{{request.user.username}}'
+ var accountModel = new cw.AccountModel({}, {
+ user: username
+ });
+
+ var accountView = new cw.AccountView({
+ model: accountModel,
+ current_user: current_user,
+ });
+ accountModel.fetch();
+ accountView.render();
+</script>
\ No newline at end of file
diff --git a/project/accounts/templates/accounts/account.html b/project/accounts/templates/accounts/account.html
index ad985c95d..a76cced79 100644
--- a/project/accounts/templates/accounts/account.html
+++ b/project/accounts/templates/accounts/account.html
@@ -3,25 +3,25 @@
<head>
{% load static %}
{% include "base/links.html" %}
- {% include "base/less_headers/account_less.html" %}
+ {% include "accounts/base/less_headers/account_less.html" %}
</head>
<!-- Util Partials -->
<script id="civi-template" type="text/template">
- {% include "partials/account/utils/account_civi.html" %}
+ {% include "accounts/partials/account/utils/account_civi.html" %}
</script>
<script id="user-chip-template" type="text/template">
- {% include "partials/account/utils/user_chip.html" %}
+ {% include "accounts/partials/account/utils/user_chip.html" %}
</script>
<script id="user-card-template" type="text/template">
- {% include "partials/account/utils/user_card.html" %}
+ {% include "accounts/partials/account/utils/user_card.html" %}
</script>
<script id="issue-template" type="text/template">
- {% include "partials/account/utils/account_issue.html" %}
+ {% include "accounts/partials/account/utils/account_issue.html" %}
</script>
<script id="rep-template" type="text/template">
- {% include "partials/account/utils/representative_card.html" %}
+ {% include "accounts/partials/account/utils/representative_card.html" %}
</script>
<script id="rep-chip-template" type="text/template">
{% verbatim %}
@@ -34,28 +34,28 @@
<!-- account tab templates -->
<script id="my-civis-template" type="text/template">
- {% include "partials/account/tabs/my_civis.html" %}
+ {% include "accounts/partials/account/tabs/my_civis.html" %}
</script>
<script id="followers-template" type="text/template">
- {% include "partials/account/tabs/followers.html" %}
+ {% include "accounts/partials/account/tabs/followers.html" %}
</script>
<script id="following-template" type="text/template">
- {% include "partials/account/tabs/following.html" %}
+ {% include "accounts/partials/account/tabs/following.html" %}
</script>
<script id="my-issues-template" type="text/template">
- {% include "partials/account/tabs/my_issues.html" %}
+ {% include "accounts/partials/account/tabs/my_issues.html" %}
</script>
<!-- account base templates -->
<script id="account-template" type="text/template">
- {% include "partials/account/account_base.html" %}
+ {% include "accounts/partials/account/account_base.html" %}
</script>
<script id="sidebar-template" type="text/template">
- {% include "partials/account/account_sidebar.html" %}
+ {% include "accounts/partials/account/account_sidebar.html" %}
</script>
<script id="settings-template" type="text/template">
- {% include "partials/account/account_settings.html" %}
+ {% include "accounts/partials/account/account_settings.html" %}
</script>
diff --git a/project/accounts/templates/accounts/base/less_headers/feed_less.html b/project/accounts/templates/accounts/base/less_headers/feed_less.html
new file mode 100644
index 000000000..e935e72f8
--- /dev/null
+++ b/project/accounts/templates/accounts/base/less_headers/feed_less.html
@@ -0,0 +1,5 @@
+{% load static %}
+<link type="text/css" rel="stylesheet/less" href="{% static "less/base.less" %}"/>
+<link type="text/css" rel="stylesheet/less" href="{% static "less/utils.less" %}"/>
+<link type="text/css" rel="stylesheet/less" href="{% static "less/feed.less" %}"/>
+<script src="{% static "dependencies/less.min.js" type="text/javascript" %}"></script>
diff --git a/project/accounts/templates/accounts/base/less_headers/setup_less.html b/project/accounts/templates/accounts/base/less_headers/setup_less.html
new file mode 100644
index 000000000..694f9d241
--- /dev/null
+++ b/project/accounts/templates/accounts/base/less_headers/setup_less.html
@@ -0,0 +1,5 @@
+{% load static %}
+<link type="text/css" rel="stylesheet/less" href="{% static "less/base.less" %}"/>
+<link type="text/css" rel="stylesheet/less" href="{% static "less/utils.less" %}"/>
+<link type="text/css" rel="stylesheet/less" href="{% static "less/setup.less" %}"/>
+<script src="{% static "dependencies/less.min.js" type="text/javascript" %}"></script>
diff --git a/project/accounts/templates/accounts/base/less_headers/thread_less.html b/project/accounts/templates/accounts/base/less_headers/thread_less.html
new file mode 100644
index 000000000..003fb8c5b
--- /dev/null
+++ b/project/accounts/templates/accounts/base/less_headers/thread_less.html
@@ -0,0 +1,5 @@
+{% load static %}
+<link type="text/css" rel="stylesheet/less" href="{% static "less/base.less" %}"/>
+<link type="text/css" rel="stylesheet/less" href="{% static "less/utils.less" %}"/>
+<link type="text/css" rel="stylesheet/less" href="{% static "less/thread.less" %}"/>
+<script src="{% static "dependencies/less.min.js" type="text/javascript" %}"></script>
diff --git a/project/accounts/templates/accounts/base/links.html b/project/accounts/templates/accounts/base/links.html
new file mode 100644
index 000000000..5cb85d105
--- /dev/null
+++ b/project/accounts/templates/accounts/base/links.html
@@ -0,0 +1,23 @@
+{% load static %}
+{% csrf_token %}
+<meta name="google-site-verification" content="QekFwH1-88aQFx0rSPRCUTnJBrvpn0yHw4CR0okzHek" />
+<meta name="viewport" content="width=device-width, initial-scale=1">
+<meta charset="utf-8"/>
+<link href="https://fonts.googleapis.com/css?family=Lato:300,400,600,700" rel="stylesheet">
+
+<link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon.png">
+<link rel="icon" type="image/png" href="/favicon-32x32.png" sizes="32x32">
+<link rel="shortcut icon" href="/favicon.ico">
+<meta name="msapplication-TileImage" content="/mstile-144x144.png">
+<meta name="msapplication-TileColor" content="#fcffff">
+
+<link type="text/css" rel="stylesheet" href="{% static "dependencies/materialize.min.css" %}">
+<link type="text/css" rel="stylesheet" href="{% static "dependencies/magicsuggest-min.css" %}">
+<script type="text/javascript" src="{% static "dependencies/jquery.min.js" %}"></script>
+<script type="text/javascript" src="{% static "dependencies/underscore.js" %}"></script>
+<script type="text/javascript" src="{% static "dependencies/backbone-min.js" %}"></script>
+<script type="text/javascript" src="{% static "dependencies/materialize.min.js" %}"></script>
+<script type="text/javascript" src="{% static "dependencies/magicsuggest-min.js" %}"></script>
+<script type="text/javascript" src="{% static "js/base.js" %}"></script>
+<link href="https://fonts.googleapis.com/icon?family=Material+Icons" rel="stylesheet">
+<title>CiviWiki</title>
diff --git a/project/accounts/templates/accounts/feed.html b/project/accounts/templates/accounts/feed.html
index 3e6ecfef7..0eb950075 100644
--- a/project/accounts/templates/accounts/feed.html
+++ b/project/accounts/templates/accounts/feed.html
@@ -8,20 +8,20 @@
</head>
<script id="feed-template" type="text/template">
- {% include "partials/feed/feed_base.html" %}
+ {% include "accounts/partials/feed/feed_base.html" %}
</script>
<script id="new-thread-template" type="text/template">
- {% include "partials/feed/new_thread.html" %}
+ {% include "accounts/partials/feed/new_thread.html" %}
</script>
<script id="thread-card-template" type="text/template">
- {% include "partials/feed/thread_card.html" %}
+ {% include "accounts/partials/feed/thread_card.html" %}
</script>
<script id="feed-list-template" type="text/template">
- {% include "partials/feed/feed_list.html" %}
+ {% include "accounts/partials/feed/feed_list.html" %}
</script>
<script id="categories-template" type="text/template">
- {% include "partials/feed/categories.html" %}
+ {% include "accounts/partials/feed/categories.html" %}
</script>
<body>
diff --git a/project/accounts/templates/accounts/partials/account/tabs/followers.html b/project/accounts/templates/accounts/partials/account/tabs/followers.html
index d0a831e02..1d9cfbd79 100644
--- a/project/accounts/templates/accounts/partials/account/tabs/followers.html
+++ b/project/accounts/templates/accounts/partials/account/tabs/followers.html
@@ -1,7 +1,9 @@
-{% verbatim %}
+{% load i18n %}
+
+
<div class="people scroll">
<div class="row tab-title">
- <div class="col s6 title-lato">{{this.model.get('followers').length}} Followers</div>
+ <div class="col s6 title-lato">{% translate "Followers" %}</div>
<div class="input-field col s4 push-s2">
<input id="search" type="text" class="validate">
<label for="search">Search</label>
@@ -9,7 +11,9 @@
</div>
<!-- Populate Civi list -->
<div class="row">
+ {% verbatim %}
{{# if (this.model.get('followers').length > 0) { }}
+ {{this.model.get('followers').length}} Followers<p></p>
{{# _.each(this.model.get('followers'), function (u) {}}
<div class="col s12 m6">
{{= cw.underscorePartial('user-chip-template', u)}}
@@ -19,11 +23,12 @@
<div class="section no-state">
<div class="container">
<div class="section">
- <div class="center title-lato grey-text">NO FOLLOWERS</div>
+ <div class="center title-lato grey-text">No followers</div>
</div>
</div>
</div>
{{# } }}
+ {% endverbatim %}
</div>
</div>
-{% endverbatim %}
+
diff --git a/project/accounts/templates/accounts/partials/account/tabs/following.html b/project/accounts/templates/accounts/partials/account/tabs/following.html
index b7c2ed41b..50f406746 100644
--- a/project/accounts/templates/accounts/partials/account/tabs/following.html
+++ b/project/accounts/templates/accounts/partials/account/tabs/following.html
@@ -19,7 +19,7 @@
<div class="section no-state">
<div class="container">
<div class="section">
- <div class="center title-lato grey-text">NO FOLLOWINGS</div>
+ <div class="center title-lato grey-text">Not following any users</div>
</div>
</div>
</div>
diff --git a/project/accounts/templates/accounts/partials/account/tabs/my_civis.html b/project/accounts/templates/accounts/partials/account/tabs/my_civis.html
index 47271a5da..3b3eb5ba7 100644
--- a/project/accounts/templates/accounts/partials/account/tabs/my_civis.html
+++ b/project/accounts/templates/accounts/partials/account/tabs/my_civis.html
@@ -1,20 +1,20 @@
{% load i18n %}
-{% verbatim %}
<div class="profile-civi-list col s12">
<div class="row tab-title">
- <div class="col s6 title-lato">{% trans "My Civi Activity" %} </div>
+ <div class="col s6 title-lato">{% translate "My Civi Activity" %} </div>
<div class="sort-options col s2 push-s4 section">
- <label>{% trans "View By:" %} </label>
+ <label>{% translate "View By:" %} </label>
<select class="browser-default">
- <option value="1" selected>{% trans "Recent" %} </option>
- <option value="2">{% trans "Popular" %} </option>
- <option value="3">{% trans "Top" %} </option>
+ <option value="1" selected>{% translate "Recent" %} </option>
+ <option value="2">{% translate "Popular" %} </option>
+ <option value="3">{% translate "Top" %} </option>
</select>
</div>
</div>
<!-- Populate Civi list -->
<div class="row">
+ {% verbatim %}
{{# if (this.model.get('history').length > 0) { }}
{{# _.each(this.model.get('history'), function (c) { }}
{{= cw.underscorePartial('civi-template', JSON.parse(c))}}
@@ -23,11 +23,12 @@
<div class="section no-state">
<div class="container">
<div class="section">
- <div class="center title-lato text">{% trans "No Civis" %} </div>
+ <div class="center title-lato text">No activity</div>
</div>
</div>
</div>
{{# } }}
+ {% endverbatim %}
</div>
</div>
-{% endverbatim %}
+
diff --git a/project/accounts/templates/accounts/partials/account/tabs/my_issues.html b/project/accounts/templates/accounts/partials/account/tabs/my_issues.html
index 42fbbdc18..b81dce858 100644
--- a/project/accounts/templates/accounts/partials/account/tabs/my_issues.html
+++ b/project/accounts/templates/accounts/partials/account/tabs/my_issues.html
@@ -1,9 +1,10 @@
-{% verbatim %}
+{% load i18n %}
+
<div class="row tab-title">
- <div class="col s12 title-lato">Issues I care about</div>
+ <div class="col s12 title-lato">{% translate "Issues I care about" %}</div>
</div>
<div class="row">
-
+ {% verbatim %}
{{# if (this.model.get('issues').length > 0) { }}
{{# _.each(this.model.get('issues'), function (i) { }}
{{= cw.underscorePartial('issue-template', i )}}
@@ -12,10 +13,11 @@
<div class="section no-state">
<div class="container">
<div class="section">
- <div class="center title-lato grey-text">NO ISSUES</div>
+ <div class="center title-lato grey-text">No issues found</div>
</div>
</div>
</div>
{{# } }}
+ {% endverbatim %}
</div>
-{% endverbatim %}
+
diff --git a/project/accounts/templates/accounts/register/entry_base.html b/project/accounts/templates/accounts/register/entry_base.html
index 3df235bfa..01c2a9d66 100644
--- a/project/accounts/templates/accounts/register/entry_base.html
+++ b/project/accounts/templates/accounts/register/entry_base.html
@@ -2,8 +2,8 @@
<html>
<head>
{% load static %}
- {% include "base/links.html" %}
- {% include "base/less_headers/login_less.html" %}
+ {% include "accounts/base/links.html" %}
+ {% include "accounts/base/less_headers/login_less.html" %}
<title>{% block title %}CiviWiki{% endblock %}</title>
</head>
<body>
diff --git a/project/accounts/templates/accounts/reset_by_email.html b/project/accounts/templates/accounts/reset_by_email.html
index cdf3d5b7a..e3cd0d4bc 100644
--- a/project/accounts/templates/accounts/reset_by_email.html
+++ b/project/accounts/templates/accounts/reset_by_email.html
@@ -3,7 +3,7 @@
<head>
{% load static %}
{% include "base/links.html" %}
- {% include "base/less_headers/login_less.html" %}
+ {% include "accounts/base/less_headers/login_less.html" %}
<title>Account Recovery</title>
</head>
<body>
diff --git a/project/accounts/templates/accounts/tabs/followers.html b/project/accounts/templates/accounts/tabs/followers.html
deleted file mode 100644
index d0a831e02..000000000
--- a/project/accounts/templates/accounts/tabs/followers.html
+++ /dev/null
@@ -1,29 +0,0 @@
-{% verbatim %}
-<div class="people scroll">
- <div class="row tab-title">
- <div class="col s6 title-lato">{{this.model.get('followers').length}} Followers</div>
- <div class="input-field col s4 push-s2">
- <input id="search" type="text" class="validate">
- <label for="search">Search</label>
- </div>
- </div>
- <!-- Populate Civi list -->
- <div class="row">
- {{# if (this.model.get('followers').length > 0) { }}
- {{# _.each(this.model.get('followers'), function (u) {}}
- <div class="col s12 m6">
- {{= cw.underscorePartial('user-chip-template', u)}}
- </div>
- {{# }, this); }}
- {{# } else { }}
- <div class="section no-state">
- <div class="container">
- <div class="section">
- <div class="center title-lato grey-text">NO FOLLOWERS</div>
- </div>
- </div>
- </div>
- {{# } }}
- </div>
-</div>
-{% endverbatim %}
diff --git a/project/accounts/templates/accounts/tabs/following.html b/project/accounts/templates/accounts/tabs/following.html
deleted file mode 100644
index b7c2ed41b..000000000
--- a/project/accounts/templates/accounts/tabs/following.html
+++ /dev/null
@@ -1,29 +0,0 @@
-{% verbatim %}
-<div class="people scroll">
- <div class="row tab-title">
- <div class="col s6 title-lato">{{this.model.get('following').length}} Following</div>
- <div class="input-field col s4 push-s2">
- <input id="search" type="text" class="validate">
- <label for="search">Search</label>
- </div>
- </div>
- <!-- Populate Civi list -->
- <div class="row">
- {{# if (this.model.get('following').length > 0) { }}
- {{# _.each(this.model.get('following'), function (u) { }}
- <div class="col s12 m6">
- {{= cw.underscorePartial('user-chip-template', u)}}
- </div>
- {{# }, this); }}
- {{# } else { }}
- <div class="section no-state">
- <div class="container">
- <div class="section">
- <div class="center title-lato grey-text">NO FOLLOWINGS</div>
- </div>
- </div>
- </div>
- {{# } }}
- </div>
-</div>
-{% endverbatim %}
diff --git a/project/accounts/templates/accounts/tabs/my_civis.html b/project/accounts/templates/accounts/tabs/my_civis.html
deleted file mode 100644
index 190afdf84..000000000
--- a/project/accounts/templates/accounts/tabs/my_civis.html
+++ /dev/null
@@ -1,31 +0,0 @@
-{% verbatim %}
-<div class="profile-civi-list col s12">
- <div class="row tab-title">
- <div class="col s6 title-lato">My Civi Activity</div>
- <!-- <div class="sort-options col s2 push-s4 section">
- <label>View By:</label>
- <select class="browser-default">
- <option value="1" selected>Recent</option>
- <option value="2">Popular</option>
- <option value="3">Top</option>
- </select>
- </div> -->
- </div>
- <!-- Populate Civi list -->
- <div class="row">
- {{# if (this.model.get('history').length > 0) { }}
- {{# _.each(this.model.get('history'), function (c) { }}
- {{= cw.underscorePartial('civi-template', JSON.parse(c))}}
- {{# }, this); }}
- {{# } else { }}
- <div class="section no-state">
- <div class="container">
- <div class="section">
- <div class="center title-lato text">NO CIVIS</div>
- </div>
- </div>
- </div>
- {{# } }}
- </div>
-</div>
-{% endverbatim %}
diff --git a/project/accounts/templates/accounts/tabs/my_issues.html b/project/accounts/templates/accounts/tabs/my_issues.html
deleted file mode 100644
index 42fbbdc18..000000000
--- a/project/accounts/templates/accounts/tabs/my_issues.html
+++ /dev/null
@@ -1,21 +0,0 @@
-{% verbatim %}
-<div class="row tab-title">
- <div class="col s12 title-lato">Issues I care about</div>
-</div>
-<div class="row">
-
- {{# if (this.model.get('issues').length > 0) { }}
- {{# _.each(this.model.get('issues'), function (i) { }}
- {{= cw.underscorePartial('issue-template', i )}}
- {{# }, this); }}
- {{# } else { }}
- <div class="section no-state">
- <div class="container">
- <div class="section">
- <div class="center title-lato grey-text">NO ISSUES</div>
- </div>
- </div>
- </div>
- {{# } }}
-</div>
-{% endverbatim %}
diff --git a/project/accounts/views.py b/project/accounts/views.py
index 90eb64c82..8821e8bd2 100644
--- a/project/accounts/views.py
+++ b/project/accounts/views.py
@@ -164,4 +164,4 @@ def get(self, request):
"username": request.user.username,
"email": request.user.email,
}
- return TemplateResponse(request, "user-setup.html", data)
+ return TemplateResponse(request, "accounts/user-setup.html", data)
|
chainer__chainer-2961 | Variable defines __hash__ but does not define __eq__
`Variable` defines `__hash__` but doesn't define `__eq__`:
```
class Variable(object):
# ...
def __eq__(self, other):
raise NotImplementedError()
# ...
def __hash__(self):
return super(Variable, self).__hash__()
```
But, the python documentation (https://docs.python.org/3.1/reference/datamodel.html#object.__hash__) says:
> If a class does not define an __eq__() method it should not define a __hash__() operation either;
Is there a reason `Variable` is designed like this?
| [
{
"content": "import collections\nimport copy\nimport heapq\nimport traceback\nimport warnings\nimport weakref\n\nimport numpy\n\nimport chainer\nfrom chainer import cuda\nfrom chainer import initializers\nfrom chainer.initializers import constant\nfrom chainer.utils import argument\n\n\ndef _check_grad_type(func, x, gx):\n if x.data is None or gx is None:\n # ``x.data is None`` implies that the data array is not retained\n return\n if not isinstance(gx, type(x.data)):\n msg = ('Type of data and grad mismatch\\n%s != %s' %\n (type(x.data), type(gx)))\n typ = TypeError\n elif gx.dtype != x.data.dtype:\n msg = ('Dtype of data and grad mismatch\\n%s != %s' %\n (x.data.dtype, gx.dtype))\n typ = TypeError\n elif gx.shape != x.data.shape:\n msg = ('Shape of data and grad mismatch\\n%s != %s' %\n (x.data.shape, gx.shape))\n typ = ValueError\n else:\n return\n\n detail = ''\n if func:\n detail = 'Function `{0}` ({1}) has a bug.\\n'.format(\n type(func)._impl_name, func.label)\n stack = func.stack\n if stack:\n detail += 'Stacktrace of the function is below:\\n'\n for line in traceback.format_list(func.stack):\n detail += line\n detail += '''\nPlease report this error to the issue tracker with the stack trace,\nthe information of your environment, and your script:\nhttps://github.com/chainer/chainer/issues/new.\n'''.format(type(func).__name__, func.label)\n\n raise typ(detail + msg)\n\n\ndef variable_repr(var):\n \"\"\"Return the string representation of a variable.\n\n Args:\n var (~chainer.Variable): Input Variable.\n .. seealso:: numpy.array_repr\n \"\"\"\n xp = cuda.get_array_module(var)\n if xp is numpy:\n arr = var.data\n else:\n arr = var.data.get()\n\n if var.name:\n prefix = 'variable ' + var.name\n else:\n prefix = 'variable'\n\n if arr is None:\n lst = 'None'\n elif arr.size > 0 or arr.shape == (0,):\n lst = numpy.array2string(arr, None, None, None, ', ', prefix + '(')\n else: # show zero-length shape unless it is (0,)\n lst = '[], shape=%s' % (repr(arr.shape),)\n\n return '%s(%s)' % (prefix, lst)\n\n\ndef variable_str(var):\n \"\"\"Return the string representation of a variable.\n\n Args:\n var (~chainer.Variable): Input Variable.\n .. seealso:: numpy.array_str\n \"\"\"\n xp = cuda.get_array_module(var)\n if xp is numpy:\n arr = var.data\n else:\n arr = var.data.get()\n\n if var.name:\n prefix = 'variable ' + var.name\n else:\n prefix = 'variable'\n\n if arr is None:\n lst = 'None'\n else:\n lst = numpy.array2string(arr, None, None, None, ' ', prefix + '(')\n\n return '%s(%s)' % (prefix, lst)\n\n\nclass VariableNode(object):\n\n \"\"\"Node in the backward computational graph representing a variable.\n\n This object represents a variable node in a computational graph. The node\n is used in error backpropagation (a.k.a. backprop) to determine which\n gradient to be passed to each function.\n\n A variable node is held by the corresponding :class:`Variable` object,\n which is managed by users. :class:`Function` objects that take the variable\n as an input also hold references to the variable node.\n\n Note that the node does not hold a reference to the corresponding data\n array in general. The data array is actually accessible by the node in the\n following cases.\n\n 1. If there exists a :class:`Variable` object that holds a reference to the\n variable node, the variable node holds a weak reference to the variable\n object, and thus the data array is accessible via the weak reference.\n 2. If :meth:`retain_data` is called, the node holds a reference to the data\n array. It is mainly called by a function that needs the input or output\n data array in its backprop procedure. See :meth:`Function.retain_inputs`\n and :meth:`Function.retain_outputs` for more details.\n\n Users usually do not need to touch this variable node object. The\n computational graph is automatically managed by Chainer, and any interface\n that is beneficial for users is also provided by :class:`Variable`.\n\n Args:\n variable (Variable): The corresponding variable object.\n name (str): Name of the variable node.\n\n Attributes:\n dtype: Data type of the data array.\n shape: Shape of the data array.\n name (str): Name of the variable node.\n\n \"\"\"\n\n _creator_node = None\n _data = None\n _rank = 0\n # Name of the Function is assigned if this variable is a gradient generated\n # by an old-style Function\n _old_style_grad_generator = None\n\n def __init__(self, variable, name, **kwargs):\n argument.check_unexpected_kwargs(\n kwargs,\n grad='unexpected keyword argument \"grad\": '\n 'pass the gradient to Variable instead'\n )\n self._variable = weakref.ref(variable)\n self.name = name\n self._requires_grad = variable.requires_grad\n\n vdata = variable.data\n self._set_data_type(vdata)\n\n @property\n def creator(self):\n \"\"\"Function object that created this variable node.\n\n When the function is implemented with the old-style API (i.e., it uses\n :class:`Function` class), this property returns the :class:`Function`\n object. The object is extracted from the :class:`FunctionAdapter`\n object, so the returned object is not the function node, but instead\n the actual implementation of forward and backward procedures.\n\n When the function is implemented with the new-style API (i.e., it uses\n :class:`FunctionNode` class), this property returns the function node\n object. In this case, the returned object is same as\n :attr:`creator_node`.\n\n .. warning::\n\n As of v3.0.0, when the creator is an old-style function, the\n following code is invalid:\n\n .. code-block:: python\n\n creator = v.creator\n v.creator = None\n ...\n v.creator = creator\n\n The point is that :class:`FunctionNode` objects are used as nodes\n in the computational graph instead of :class:`Function`, and each\n :class:`Function` object only holds a *weak reference* to the\n corresponding :class:`FunctionNode`. Since ``creator`` returns the\n :class:`Function` object, the :class:`FunctionNode` object is not\n kept by preserving ``creator``.\n\n The above code should be fixed as follows.\n\n .. code-block:: python\n\n creator_node = v.creator_node\n v.creator_node = None\n ...\n v.creator_node = creator_node\n\n \"\"\"\n node = self._creator_node\n if node is None:\n return None\n\n if isinstance(node, chainer.function.FunctionAdapter):\n return node.function\n return node\n\n @creator.setter\n def creator(self, func):\n self.creator_node = func\n\n @property\n def creator_node(self):\n \"\"\"Function node that has this variable as an output.\n\n See :class:`FunctionNode` for the definition of a function node.\n\n \"\"\"\n return self._creator_node\n\n @creator_node.setter\n def creator_node(self, func):\n if isinstance(func, chainer.Function):\n func = func.node\n self._creator_node = func\n if func is not None:\n self._rank = func.rank + 1\n\n @property\n def data(self):\n \"\"\"Data array of the corresponding variable.\n\n If the data is not available, it returns ``None``.\n\n \"\"\"\n return self._data\n\n @data.setter\n def data(self, d):\n self._data = d\n self._set_data_type(d)\n\n @property\n def grad(self):\n \"\"\"Gradient array of the corresponding variable.\n\n If the variable is not available, it returns ``None``.\n\n \"\"\"\n var = self.get_variable()\n return None if var is None else var.grad\n\n @property\n def grad_var(self):\n \"\"\"Gradient variable of the corresponding variable.\n\n If the corresponding variable is not available, it return ``None``.\n\n \"\"\"\n var = self.get_variable()\n return None if var is None else var._grad_var\n\n @property\n def label(self):\n \"\"\"Short text that represents the variable node.\"\"\"\n if self.shape == ():\n return str(self.dtype)\n return '(%s), %s' % (', '.join(map(str, self.shape)),\n str(self.dtype))\n\n @property\n def rank(self):\n return self._rank\n\n @property\n def requires_grad(self):\n \"\"\"It indicates that ``grad`` will be set in backward calculation.\"\"\"\n return self._requires_grad\n\n def get_variable(self):\n \"\"\"Returns the corresponding :class:`Variable` object.\n\n VariableNode object holds a weak reference of the variable object. If\n the reference is alive, it is returned by this property. Otherwise,\n this property creates a new :class:`Variable` object from this node\n object and returns it.\n\n Returns:\n Variable: The variable object that refers this node.\n\n \"\"\"\n var = self._variable()\n if var is not None:\n return var\n\n var = Variable(self.data, name=self.name,\n requires_grad=self._requires_grad)\n var._node = self\n return var\n\n def set_creator(self, creator):\n \"\"\"Sets a :class:`Function` object that created this node.\n\n This method is equivalent to ``self.creator = creator``. A\n :class:`FunctionNode` object can also be passed.\n\n Args:\n creator (Function or FunctionNode): Function that has created this\n variable.\n\n \"\"\"\n self.creator = creator\n\n def set_creator_node(self, creator_node):\n \"\"\"Sets a :class:`FunctionNode` object that created this node.\n\n This method is equivalent to ``self.creator_node = creator_node``. A\n :class:`Function` object can also be passed, in which case the\n :attr:`~Function.node` object is extracted.\n\n Args:\n creator_node (FunctionNode or Function): Function node that has\n this variable as an output.\n\n \"\"\"\n self.creator_node = creator_node\n\n def unchain(self):\n \"\"\"Deletes the reference to the creator of this variable node.\n\n This method is equivalent to ``self.creator_node = None``.\n\n \"\"\"\n self.creator_node = None\n\n def retain_data(self):\n \"\"\"Lets the node hold a reference to the underlying data array.\n\n This method gets the data array of the corresponding variable and keeps\n it. If the weak reference to the corresponding variable is dead, it\n raises an error.\n\n \"\"\"\n variable = self._variable()\n if variable is not None:\n self.data = variable.data\n else:\n raise RuntimeError('cannot retain variable data: the variable has '\n 'been already released')\n\n def _set_data_type(self, d):\n if d is None:\n self.dtype = None\n self.shape = None\n else:\n self.dtype = d.dtype\n self.shape = d.shape\n\n def _check_old_style_gradient(self):\n if self._old_style_grad_generator is not None:\n raise RuntimeError(\n 'cannot twice-differentiate an old style Function \"%s\"' %\n self._old_style_grad_generator)\n\n\ndef _create_variable(data, name, grad, requires_grad):\n return Variable(\n data, name=name, grad=grad, requires_grad=requires_grad)\n\n\nclass Variable(object):\n\n \"\"\"__init__(data=None, *, name=None, grad=None, requires_grad=True)\n\n Array with a structure to keep track of computation.\n\n Every variable holds a data array of type either :class:`numpy.ndarray` or\n :class:`cupy.ndarray`.\n\n A variable object holds a data array and a :class:`VariableNode` object of\n a computational graph. If the variable is constructed by the user, the node\n is *root* and does not hold any parent. If the variable is constructed by a\n :class:`FunctionNode` object, the node holds a reference to its parent\n called :attr:`creator_node`. This reference is used in backpropagation to\n backtrack the graph.\n\n Users can disable (resp. enable) this chaining behavior by calling\n :func:`~chainer.no_backprop_mode` (resp.\n :func:`~chainer.force_backprop_mode`).\n In the former context, a variable never creates a computational graph,\n whereas in the latter context, it is forced to create.\n\n .. warning::\n\n ``volatile`` argument is not supported anymore since v2.\n Instead, use :func:`chainer.no_backprop_mode`.\n\n Args:\n data (numpy.ndarray or cupy.ndarray): Initial data array.\n name (str): Name of the variable.\n grad (numpy.ndarray or cupy.ndarray): Initial gradient array.\n requires_grad (bool): Boolean indicating whether ``grad`` will be set\n in backward calculation.\n\n Attributes:\n data: Data array of type either :class:`numpy.ndarray` or\n :class:`cupy.ndarray`. If it is None, the variable is left in an\n uninitialized state.\n grad_var (Variable): Gradient variable.\n\n \"\"\" # NOQA\n\n def __init__(self, data=None, **kwargs):\n argument.check_unexpected_kwargs(\n kwargs, volatile='volatile argument is not supported anymore. '\n 'Use chainer.using_config')\n name, grad, requires_grad \\\n = argument.parse_kwargs(\n kwargs, ('name', None), ('grad', None),\n ('requires_grad', True))\n\n if (data is not None and\n not isinstance(data, (numpy.ndarray, cuda.ndarray))):\n msg = '''numpy.ndarray or cuda.ndarray are expected.\nActual: {0}'''.format(type(data))\n raise TypeError(msg)\n\n # Use a list as a data structure to hold the data array indirectly to\n # abstract its initialized/uninitialized state.\n self._data = [data]\n self._requires_grad = requires_grad\n self._node = VariableNode(self, name)\n self._grad_var = None if grad is None else Variable(grad)\n\n def __copy__(self):\n return self._copy_to(Variable())\n\n def _copy_to(self, target):\n target.__dict__ = copy.copy(self.__dict__)\n target._node = VariableNode(target, self.name)\n return target\n\n def __reduce__(self):\n return _create_variable, (self.data, self.name, self.grad,\n self._requires_grad)\n\n def __repr__(self):\n return variable_repr(self)\n\n def __str__(self):\n return variable_str(self)\n\n @property\n def name(self):\n return self._node.name\n\n @name.setter\n def name(self, n):\n self._node.name = n\n\n def summary(self):\n if self.name:\n return '<variable %s>' % self.name\n else:\n return '<variable at 0x%x>' % id(self)\n\n def debug_print(self):\n \"\"\"Display a summary of the stored data and location of the Variable\"\"\"\n\n msg = \"\"\"{summary}\n- device: {device}\n- backend: {background}\n- shape: {shape}\n- dtype: {dtype}\n- statistics: {stats}\n- grad: {grad}\"\"\"\n\n stats_msg = 'mean={0:.8f}, std={1:.8f}'\n\n try:\n device = self.data.device\n except AttributeError:\n device = 'CPU'\n\n with cuda.get_device_from_array(self.data) as dev:\n xp = numpy if int(dev) == -1 else cuda.cupy\n\n if self.grad is None:\n grad = None\n elif xp.all(self.grad == 0):\n grad = 0\n else:\n grad = stats_msg.format(float(xp.mean(self.grad)),\n float(xp.std(self.grad)))\n\n stats = stats_msg.format(float(xp.mean(self.data)),\n float(xp.std(self.data)))\n\n return msg.format(summary=self.summary(),\n grad=grad, shape=self.data.shape,\n background=type(self.data),\n dtype=self.data.dtype, device=device,\n stats=stats)\n\n def __pos__(self):\n return self\n\n def __len__(self):\n \"\"\"Returns the first dimension of the data array.\n\n Returns:\n int: Number of the first dimension of the data array.\n\n \"\"\"\n return len(self.data)\n\n @property\n def label(self):\n \"\"\"Short text that represents the variable.\"\"\"\n return self._node.label\n\n @property\n def creator(self):\n \"\"\"Function implementation that created this variable.\n\n When this variable has been created by an old-style function (i.e., it\n is implemented as a subclass of :class:`Function`), this property\n returns that :class:`Function` object.\n\n When this variable has been created by a new-style function (i.e., it\n is implemented as a subclass of :class:`FunctionNode` class), this\n property returns that node object.\n\n \"\"\"\n return self._node.creator\n\n @creator.setter\n def creator(self, func):\n self._node.creator = func\n\n @property\n def creator_node(self):\n \"\"\":class:`FunctionNode` object that created this variable.\n\n This property has a setter to which ``None`` can be set. Setting\n ``None`` to this property is equivalent to call :meth:`unchain`;\n it purges the variable from the function that created this variable.\n\n The setter also accepts the original :class:`FunctionNode` object that\n created this variable. For example, you can once set ``None`` to this\n property and then set the original value again.\n\n .. note::\n Setting an irrelevant :meth:`FunctionNode` object does not emit any\n error immediately, whereas the behavior is undefined. Do not set\n a :meth:`FunctionNode` object that did not create this variable\n object.\n\n \"\"\"\n return self._node._creator_node\n\n @creator_node.setter\n def creator_node(self, func):\n self._node.creator_node = func\n\n @property\n def data(self):\n return self._data[0]\n\n @data.setter\n def data(self, d):\n self._data[0] = d\n self._node._set_data_type(d)\n\n @property\n def grad(self):\n \"\"\"Gradient array of this variable.\n\n Not that this property returns the underlying array of the gradient\n variable instead of the gradient variable itself; to get/set\n gradient variable, use :attr:`grad_var` instead.\n\n \"\"\"\n gv = self._grad_var\n return None if gv is None else gv.data\n\n @grad.setter\n def grad(self, g):\n self.grad_var = None if g is None else Variable(g)\n\n @property\n def grad_var(self):\n return self._grad_var\n\n @grad_var.setter\n def grad_var(self, g):\n if g is not None:\n _check_grad_type(None, self, g.data)\n self._grad_var = g\n\n @property\n def shape(self):\n return self.data.shape\n\n @property\n def ndim(self):\n return self.data.ndim\n\n @property\n def size(self):\n return self.data.size\n\n @property\n def dtype(self):\n return self.data.dtype\n\n @property\n def rank(self):\n return self._node.rank\n\n @property\n def node(self):\n return self._node\n\n @property\n def requires_grad(self):\n \"\"\"It indicates that ``grad`` will be set in backward calculation.\"\"\"\n return self._requires_grad\n\n def to_cpu(self):\n \"\"\"Copies the data and gradient arrays to CPU.\"\"\"\n if self.data is None:\n return\n\n self._data = [cuda.to_cpu(self.data)]\n if self._grad_var is not None:\n self._grad_var.to_cpu()\n # ensure that the node tracks the device migration\n node = self._node\n if node._data is not None:\n node.retain_data()\n\n def to_gpu(self, device=None):\n \"\"\"Copies the data and gradient arrays to specified GPU.\n\n Args:\n device: Target device specifier. If omitted, the current device is\n used.\n\n \"\"\"\n if self.data is None:\n self._initial_device = (cuda.Device().id\n if device is None else device)\n else:\n self._data = [cuda.to_gpu(self.data, device)]\n if self._grad_var is not None:\n self._grad_var.to_gpu(device)\n # ensure that the node tracks the device migration\n node = self._node\n if node._data is not None:\n node.retain_data()\n\n def cleargrad(self):\n \"\"\"Clears the gradient array.\"\"\"\n self._grad_var = None\n\n def zerograd(self):\n \"\"\"Initializes the gradient array by zeros.\n\n Note that the gradient variable is unchained from the computational\n graph by this method because this operation breaks the backprop\n validity.\n\n .. deprecated:: v1.15\n Use :meth:`cleargrad` instead.\n\n \"\"\"\n warnings.warn(\n 'Variable.zerograd is deprecated. Use Variable.cleargrad instead.',\n DeprecationWarning)\n\n if self.data is None:\n return\n\n with cuda.get_device_from_array(self.data) as dev:\n gv = self._grad_var\n if gv is None:\n xp = numpy if dev.id == -1 else cuda.cupy\n self.grad = xp.zeros_like(self.data)\n else:\n gv.unchain()\n gv.data.fill(0)\n\n def copydata(self, var):\n \"\"\"Copies the data array from given source variable.\n\n This method copies the data array from given variable to this variable.\n The copy is done even if the arrays reside on different devices,\n including across the host and a GPU device. If this variable has an\n uninitialized data array, this method initializes it by the data array\n of the given variable. Similarly, if the given variable has an\n uninitialized data array, this method initializes it by the data array\n of this variable (``self``). If both are uninitialized, this method\n does nothing.\n\n Args:\n var (Variable): Source variable.\n\n \"\"\"\n src = var.data\n dst = self.data\n if src is None:\n if dst is None:\n return\n var.initialize(self.shape)\n src = var.data\n elif dst is None:\n self.initialize(src.shape)\n dst = self.data\n src_xp = cuda.get_array_module(src)\n dst_xp = cuda.get_array_module(dst)\n if dst_xp is src_xp:\n dst_xp.copyto(dst, src)\n elif dst_xp is numpy:\n dst_xp.copyto(dst, src.get())\n else:\n dst.set(src)\n\n def addgrad(self, var):\n \"\"\"Accumulates the gradient array from given source variable.\n\n This method adds the gradient of a given variable to the gradient of\n this variable. The accumulation is even done across the host and\n different devices. If this variable has uninitialized data/grad arrays,\n this method initializes it with the shape of the given variable and\n then accumulates the gradient.\n\n Args:\n var (Variable): Source variable.\n\n \"\"\"\n src = var._grad_var\n if src is None:\n return\n\n if self.data is None:\n self.initialize(var.shape)\n dst = self._grad_var\n\n src_dev = cuda.get_device_from_array(src.data)\n dst_dev = cuda.get_device_from_array(self.data)\n\n if src_dev.id != dst_dev.id:\n src = chainer.functions.copy(src, dst_dev.id)\n self._grad_var = src if dst is None else src + dst\n\n def set_creator(self, gen_func):\n \"\"\"Notifies the variable that the given function is its creator.\n\n Args:\n gen_func (Function): Function object that creates this variable as\n one of its outputs.\n\n \"\"\"\n self._node.set_creator(gen_func)\n\n def set_creator_node(self, fnode):\n \"\"\"Notifies the variable that the given node is its creator.\n\n Args:\n fnode (FunctionNode): Function node that has this variable as an\n output.\n\n \"\"\"\n self._node.set_creator_node(fnode)\n\n def backward(self, retain_grad=False):\n \"\"\"Runs error backpropagation (a.k.a. backprop) from this variable.\n\n On backprop, :meth:`FunctionNode.backward` is called on each\n :class:`FunctionNode` object appearing in the backward graph starting\n from this variable. The backward graph is represented by backward\n references from variable nodes to their creators, and from function\n nodes to their input variable nodes. The backprop stops at all root\n nodes. Some function nodes set ``None`` as gradients of some inputs,\n where further backprop does not take place at such inputs.\n\n This method uses :data:`grad` as the initial error array. User can\n manually set a gradient array before calling this method. If\n :data:`data` contains only one element (i.e., it is scalar) and\n :data:`grad` is ``None``, then this method automatically complements\n 1.0 as the initial error. This is useful on starting backprop from\n some scalar loss value.\n\n Note that this method does not support *differentiable backprop*. Use\n :func:`grad` to compute the gradient of gradients.\n\n Args:\n retain_grad (bool): If ``True``, the gradient arrays of all\n intermediate variables are kept. Otherwise, :data:`grad` of the\n intermediate variables are set to ``None`` on appropriate\n timing, which may reduce the maximum memory consumption.\n\n In most cases of training some models, the purpose of backprop\n is to compute gradients of parameters, not of all variables,\n and therefore it is recommended to set this flag ``False``.\n\n \"\"\"\n self._node._check_old_style_gradient()\n if self.creator_node is None:\n return\n initial_device = None\n if cuda.available and isinstance(self.data, cuda.cupy.ndarray):\n try:\n initial_device = cuda.Device()\n except cuda.cupy.cuda.runtime.CUDARuntimeError as e:\n if e.status != 38: # cudaErrorNoDevice\n raise\n\n is_debug = chainer.is_debug()\n\n cand_funcs = []\n seen_set = set()\n grads = {}\n\n # Initialize error by 1, if this is a loss variable\n if self.data.size == 1 and self._grad_var is None:\n with cuda.get_device_from_array(self.data) as device:\n if device is cuda.DummyDevice:\n self.grad = numpy.ones_like(self.data)\n else:\n self.grad = cuda.cupy.ones_like(self.data)\n grads[self._node] = self._grad_var\n\n def add_cand(cand):\n if cand not in seen_set:\n # Negate since heapq is min-heap\n heapq.heappush(cand_funcs, (-cand.rank, len(seen_set), cand))\n seen_set.add(cand)\n\n add_cand(self.creator_node)\n\n def get_grad(node):\n if node is None:\n return None\n if node in grads:\n return grads[node]\n return node.grad_var\n\n while cand_funcs:\n _, _, func = heapq.heappop(cand_funcs)\n inputs = func.inputs\n outputs = [y() for y in func.outputs] # access via weak ref\n\n in_data = tuple([x.data for x in inputs])\n out_grad = tuple([get_grad(y) for y in outputs])\n out_grad_data = tuple(\n [None if g is None else g.data for g in out_grad])\n hooks = chainer.get_function_hooks()\n if func._n_local_function_hooks != 0:\n hooks = collections.OrderedDict(hooks)\n hooks.update(func.local_function_hooks)\n hooks = hooks.values() # avoid six for performance\n\n cuda.get_device_from_array(*in_data).use()\n for hook in hooks:\n hook.backward_preprocess(func, in_data, out_grad_data)\n\n # Collect the current input gradients.\n #\n # Note (Tokui): When the same variable is passed to multiple input\n # slots (e.g. an expression like ``f(x, x)``), it makes the\n # gradient accumulation complicated since the back-propagated\n # gradients w.r.t. the first and second argument should be\n # accumulated to the current gradient w.r.t. the same variable.\n # In this case, the current implementation passes the current\n # gradient only to the first occurrence of the variable in the\n # input tuple and passes ``None`` to the rest of the occurrences.\n # For example, when the input variables are ``(x, x)``, the\n # input gradient passed to the ``backward_accumulate`` method is\n # ``(gx, None)`` where ``gx`` is the current gradient of ``x``.\n # See also the docstring of ``FunctionNode.backward_accumulate``.\n target_input_indexes = [\n i for i, x in enumerate(inputs) if x.requires_grad\n ]\n target_inputs = [inputs[i] for i in target_input_indexes]\n in_grad = []\n for i, index_i in enumerate(target_input_indexes):\n x = inputs[index_i]\n if x in target_inputs[:i]:\n # Pass ``None`` for duplicated input variables except for\n # the first occurrence (see the comment above).\n gx = None\n elif x in grads:\n gx = grads[x]\n elif x.creator_node is None:\n x._check_old_style_gradient()\n # accumulate the gradient only if the node is a leaf\n gx = x.grad_var\n else:\n gx = None\n in_grad.append(gx)\n\n gxs = func.backward_accumulate(\n target_input_indexes, out_grad, in_grad)\n\n assert len(gxs) == len(in_grad)\n for hook in hooks:\n hook.backward_postprocess(func, in_data, out_grad_data)\n\n if is_debug:\n for gx in gxs:\n if gx is None:\n continue\n gx_data = gx.data\n cuda.get_device_from_array(gx_data).use()\n if cuda.get_array_module(gx_data).isnan(gx_data).any():\n msg = 'NaN is detected on backward computation'\n raise RuntimeError(msg)\n\n if not retain_grad:\n for y in outputs:\n if y is not None and y is not self.node:\n grads[y] = None\n y_var = y.get_variable()\n if y_var is not None:\n y_var._grad_var = None\n\n for i, gx in enumerate(gxs):\n if gx is None:\n continue\n\n x = target_inputs[i]\n if not x.requires_grad:\n continue\n\n _check_grad_type(func, x, gx.data)\n\n if x in target_inputs[:i]:\n # Accumulate the duplicated gradients here. See the comment\n # above the code that builds ``in_grad``.\n cur_gx = grads[x]\n grads[x] = gx if cur_gx is None else gx + cur_gx\n else:\n grads[x] = gx\n\n x_var = x.get_variable()\n if x_var is not None:\n x_var._grad_var = grads[x]\n\n if x.creator_node is not None:\n add_cand(x.creator_node)\n\n del gxs # to reduce memory usage\n if initial_device is not None:\n initial_device.use()\n\n def reshape(self, *shape):\n \"\"\"Returns a variable of a different shape and the same content.\n\n .. seealso::\n :func:`chainer.functions.reshape` for full documentation,\n\n \"\"\"\n if len(shape) == 1 and isinstance(shape[0], (tuple, list)):\n shape = shape[0]\n return chainer.functions.reshape(self, shape)\n\n def transpose(self, *axes):\n \"\"\"Permute the dimensions of an input variable without copy.\n\n .. seealso::\n :func:`chainer.functions.transpose` for full documentation.\n\n \"\"\"\n if len(axes) == 0:\n axes = None\n elif len(axes) == 1 and (isinstance(axes[0], (tuple, list)) or\n axes[0] is None):\n axes = axes[0]\n return chainer.functions.transpose(self, axes)\n\n def unchain(self):\n \"\"\"Deletes the reference to the creator of this variable.\n\n This method deletes the reference to the creator from the corresponding\n variable node. Unlike :meth:`unchain_backward`, it does not backtrack\n the graph.\n\n This method is equivalent to ``self.creator_node = None``.\n\n \"\"\"\n self.creator_node = None\n\n def unchain_backward(self):\n \"\"\"Deletes references between variable nodes and functions backward.\n\n After this method completes, intermediate variable nodes and functions\n that are not referenced from anywhere are deallocated by reference\n count GC. Also this variable itself deletes the reference to its\n creator function from the node, i.e. the node becomes root in the\n computation graph. It indicates that backprop after unchaining stops at\n this variable. This behavior is useful to implement truncated BPTT.\n\n \"\"\"\n cand_funcs = []\n seen_set = set()\n\n def add_cand(cand):\n if cand is not None and cand not in seen_set:\n cand_funcs.append(cand)\n seen_set.add(cand)\n\n add_cand(self.creator_node)\n\n while cand_funcs:\n func = cand_funcs.pop()\n for var in func.inputs:\n add_cand(var.creator_node)\n func.unchain()\n\n def retain_data(self):\n \"\"\"Lets the corresponding variable node keep the underlying array.\"\"\"\n self._node.data = self._data[0]\n\n def __lt__(self, other):\n raise NotImplementedError()\n\n def __le__(self, other):\n raise NotImplementedError()\n\n def __eq__(self, other):\n raise NotImplementedError()\n\n def __ne__(self, other):\n raise NotImplementedError()\n\n def __gt__(self, other):\n raise NotImplementedError()\n\n def __ge__(self, other):\n raise NotImplementedError()\n\n def __nonzero__(self):\n raise NotImplementedError()\n\n def __bool__(self):\n raise NotImplementedError()\n\n def __hash__(self):\n return super(Variable, self).__hash__()\n\n __array_priority__ = 200\n\n\nclass Parameter(Variable):\n\n \"\"\"Parameter variable that can be registered to a link.\n\n Parameter is a subclass of :class:`Variable`. It almost behaves as same\n as a usual variable except that a parameter can be registered to a\n :class:`~chainer.Link` object just by assigning it to an attribute of\n the link within an :meth:`~chainer.Link.init_scope` context.\n\n Parameter also supports an initialization by an initializer. It can have\n two initializers: one for the data array, and the other for the gradient\n array. The initializer only specifies the way of filling the elements of\n these arrays, and the shape information is specified at the initialization\n point.\n\n When a link that the parameter has been registered to is passed to an\n :class:`~chainer.GradientMethod`, an update rule is set to the parameter.\n This update rule specifies how to update the data array of the parameter\n using its gradient array.\n\n Args:\n initializer (~chainer.Initializer or numpy.ndarray or cupy.ndarray):\n Initializer of the data array. If ``shape`` is given, this\n initializer is immediately used to initialize the data array.\n Otherwise, if it is an array, it is immediately used as the data\n array, and otherwise the data array is left uninitialized and will\n be initialized by this initializer in :meth:`initialize`. It can\n also be a scalar, in which case the data array will be filled by\n this scalar. Note that float32 is used in this case.\n shape (int or tuple of int or None): Shape of the parameter. If it is\n ``None``, the initialization is deferred to the call of\n :meth:`initialize`.\n name (str): Name of the parameter.\n\n Attributes:\n initializer: Initializer of the data array. It is used for\n initializing the data array of an uninitialized variable.\n update_rule: :class:`~chainer.optimizer.UpdateRule` instance that\n updates this variable as a parameter. This argument is set to\n :attr:`update_rule`.\n\n \"\"\"\n\n initializer = None\n _grad_initializer = None\n _initial_device = None\n\n def __init__(self, initializer=None, shape=None, name=None):\n if initializer is None:\n initializer = constant.NaN()\n elif numpy.isscalar(initializer):\n initializer = constant.Constant(initializer)\n if shape is None:\n if isinstance(initializer, (numpy.ndarray, cuda.ndarray)):\n # parameter initialized by the initial array\n super(Parameter, self).__init__(initializer, name=name)\n else:\n # uninitialized parameter\n super(Parameter, self).__init__(name=name)\n self.initializer = initializer\n dtype = getattr(initializer, 'dtype', numpy.float32)\n self._grad_initializer = constant.NaN(dtype)\n else:\n # parameter initialized with a given shape\n if isinstance(initializer, (numpy.ndarray, cuda.ndarray)):\n xp = cuda.get_array_module(initializer)\n initializer = constant.Constant(initializer)\n else:\n xp = numpy\n data = initializers.generate_array(initializer, shape, xp)\n grad = xp.full_like(data, numpy.nan)\n super(Parameter, self).__init__(data, name=name, grad=grad)\n\n self.update_rule = None\n\n def __copy__(self):\n return self._copy_to(Parameter())\n\n def __reduce__(self):\n return _recover_parameter, (self.data, self.name, self.grad,\n self.initializer, self.update_rule)\n\n def to_cpu(self):\n super(Parameter, self).to_cpu()\n if self.data is None:\n self._initial_device = None\n\n def to_gpu(self, device=None):\n super(Parameter, self).to_gpu(device)\n if self.data is None:\n if device is None:\n device = cuda.Device().id\n self._initial_device = device\n\n def cleargrad(self):\n super(Parameter, self).cleargrad()\n if self.data is None:\n self._grad_initializer = None\n\n def zerograd(self):\n super(Parameter, self).zerograd()\n if self.data is None:\n dtype = getattr(self.initializer, 'dtype', None)\n self._grad_initializer = initializers.Zero(dtype)\n\n def initialize(self, shape):\n \"\"\"Initializes the uninitialized variable.\n\n Uninitialized variable is a variable created with the data array set to\n None. This method creates and initializes the data array. The shape of\n the variable can be left unknown until this method is called.\n\n Args:\n shape (tuple of int): Shape of the data array.\n\n \"\"\"\n xp = numpy if self._initial_device is None else cuda.cupy\n with cuda.get_device_from_id(self._initial_device):\n data = initializers.generate_array(self.initializer, shape, xp)\n\n ginit = self._grad_initializer\n grad = None if ginit is None else initializers.generate_array(\n ginit, shape, xp)\n\n self._data[0] = data\n self.grad = grad\n\n def update(self):\n \"\"\"Updates the data array using the gradient and the update rule.\n\n This method updates the parameter using the attached update rule.\n\n \"\"\"\n if self.update_rule is not None:\n self.update_rule.update(self)\n\n\ndef _recover_parameter(data, name, grad, initializer, update_rule):\n p = Parameter(initializer=initializer, name=name)\n p.data = data\n p.grad = grad\n p.update_rule = update_rule\n return p\n",
"path": "chainer/variable.py"
}
] | [
{
"content": "import collections\nimport copy\nimport heapq\nimport traceback\nimport warnings\nimport weakref\n\nimport numpy\n\nimport chainer\nfrom chainer import cuda\nfrom chainer import initializers\nfrom chainer.initializers import constant\nfrom chainer.utils import argument\n\n\ndef _check_grad_type(func, x, gx):\n if x.data is None or gx is None:\n # ``x.data is None`` implies that the data array is not retained\n return\n if not isinstance(gx, type(x.data)):\n msg = ('Type of data and grad mismatch\\n%s != %s' %\n (type(x.data), type(gx)))\n typ = TypeError\n elif gx.dtype != x.data.dtype:\n msg = ('Dtype of data and grad mismatch\\n%s != %s' %\n (x.data.dtype, gx.dtype))\n typ = TypeError\n elif gx.shape != x.data.shape:\n msg = ('Shape of data and grad mismatch\\n%s != %s' %\n (x.data.shape, gx.shape))\n typ = ValueError\n else:\n return\n\n detail = ''\n if func:\n detail = 'Function `{0}` ({1}) has a bug.\\n'.format(\n type(func)._impl_name, func.label)\n stack = func.stack\n if stack:\n detail += 'Stacktrace of the function is below:\\n'\n for line in traceback.format_list(func.stack):\n detail += line\n detail += '''\nPlease report this error to the issue tracker with the stack trace,\nthe information of your environment, and your script:\nhttps://github.com/chainer/chainer/issues/new.\n'''.format(type(func).__name__, func.label)\n\n raise typ(detail + msg)\n\n\ndef variable_repr(var):\n \"\"\"Return the string representation of a variable.\n\n Args:\n var (~chainer.Variable): Input Variable.\n .. seealso:: numpy.array_repr\n \"\"\"\n xp = cuda.get_array_module(var)\n if xp is numpy:\n arr = var.data\n else:\n arr = var.data.get()\n\n if var.name:\n prefix = 'variable ' + var.name\n else:\n prefix = 'variable'\n\n if arr is None:\n lst = 'None'\n elif arr.size > 0 or arr.shape == (0,):\n lst = numpy.array2string(arr, None, None, None, ', ', prefix + '(')\n else: # show zero-length shape unless it is (0,)\n lst = '[], shape=%s' % (repr(arr.shape),)\n\n return '%s(%s)' % (prefix, lst)\n\n\ndef variable_str(var):\n \"\"\"Return the string representation of a variable.\n\n Args:\n var (~chainer.Variable): Input Variable.\n .. seealso:: numpy.array_str\n \"\"\"\n xp = cuda.get_array_module(var)\n if xp is numpy:\n arr = var.data\n else:\n arr = var.data.get()\n\n if var.name:\n prefix = 'variable ' + var.name\n else:\n prefix = 'variable'\n\n if arr is None:\n lst = 'None'\n else:\n lst = numpy.array2string(arr, None, None, None, ' ', prefix + '(')\n\n return '%s(%s)' % (prefix, lst)\n\n\nclass VariableNode(object):\n\n \"\"\"Node in the backward computational graph representing a variable.\n\n This object represents a variable node in a computational graph. The node\n is used in error backpropagation (a.k.a. backprop) to determine which\n gradient to be passed to each function.\n\n A variable node is held by the corresponding :class:`Variable` object,\n which is managed by users. :class:`Function` objects that take the variable\n as an input also hold references to the variable node.\n\n Note that the node does not hold a reference to the corresponding data\n array in general. The data array is actually accessible by the node in the\n following cases.\n\n 1. If there exists a :class:`Variable` object that holds a reference to the\n variable node, the variable node holds a weak reference to the variable\n object, and thus the data array is accessible via the weak reference.\n 2. If :meth:`retain_data` is called, the node holds a reference to the data\n array. It is mainly called by a function that needs the input or output\n data array in its backprop procedure. See :meth:`Function.retain_inputs`\n and :meth:`Function.retain_outputs` for more details.\n\n Users usually do not need to touch this variable node object. The\n computational graph is automatically managed by Chainer, and any interface\n that is beneficial for users is also provided by :class:`Variable`.\n\n Args:\n variable (Variable): The corresponding variable object.\n name (str): Name of the variable node.\n\n Attributes:\n dtype: Data type of the data array.\n shape: Shape of the data array.\n name (str): Name of the variable node.\n\n \"\"\"\n\n _creator_node = None\n _data = None\n _rank = 0\n # Name of the Function is assigned if this variable is a gradient generated\n # by an old-style Function\n _old_style_grad_generator = None\n\n def __init__(self, variable, name, **kwargs):\n argument.check_unexpected_kwargs(\n kwargs,\n grad='unexpected keyword argument \"grad\": '\n 'pass the gradient to Variable instead'\n )\n self._variable = weakref.ref(variable)\n self.name = name\n self._requires_grad = variable.requires_grad\n\n vdata = variable.data\n self._set_data_type(vdata)\n\n @property\n def creator(self):\n \"\"\"Function object that created this variable node.\n\n When the function is implemented with the old-style API (i.e., it uses\n :class:`Function` class), this property returns the :class:`Function`\n object. The object is extracted from the :class:`FunctionAdapter`\n object, so the returned object is not the function node, but instead\n the actual implementation of forward and backward procedures.\n\n When the function is implemented with the new-style API (i.e., it uses\n :class:`FunctionNode` class), this property returns the function node\n object. In this case, the returned object is same as\n :attr:`creator_node`.\n\n .. warning::\n\n As of v3.0.0, when the creator is an old-style function, the\n following code is invalid:\n\n .. code-block:: python\n\n creator = v.creator\n v.creator = None\n ...\n v.creator = creator\n\n The point is that :class:`FunctionNode` objects are used as nodes\n in the computational graph instead of :class:`Function`, and each\n :class:`Function` object only holds a *weak reference* to the\n corresponding :class:`FunctionNode`. Since ``creator`` returns the\n :class:`Function` object, the :class:`FunctionNode` object is not\n kept by preserving ``creator``.\n\n The above code should be fixed as follows.\n\n .. code-block:: python\n\n creator_node = v.creator_node\n v.creator_node = None\n ...\n v.creator_node = creator_node\n\n \"\"\"\n node = self._creator_node\n if node is None:\n return None\n\n if isinstance(node, chainer.function.FunctionAdapter):\n return node.function\n return node\n\n @creator.setter\n def creator(self, func):\n self.creator_node = func\n\n @property\n def creator_node(self):\n \"\"\"Function node that has this variable as an output.\n\n See :class:`FunctionNode` for the definition of a function node.\n\n \"\"\"\n return self._creator_node\n\n @creator_node.setter\n def creator_node(self, func):\n if isinstance(func, chainer.Function):\n func = func.node\n self._creator_node = func\n if func is not None:\n self._rank = func.rank + 1\n\n @property\n def data(self):\n \"\"\"Data array of the corresponding variable.\n\n If the data is not available, it returns ``None``.\n\n \"\"\"\n return self._data\n\n @data.setter\n def data(self, d):\n self._data = d\n self._set_data_type(d)\n\n @property\n def grad(self):\n \"\"\"Gradient array of the corresponding variable.\n\n If the variable is not available, it returns ``None``.\n\n \"\"\"\n var = self.get_variable()\n return None if var is None else var.grad\n\n @property\n def grad_var(self):\n \"\"\"Gradient variable of the corresponding variable.\n\n If the corresponding variable is not available, it return ``None``.\n\n \"\"\"\n var = self.get_variable()\n return None if var is None else var._grad_var\n\n @property\n def label(self):\n \"\"\"Short text that represents the variable node.\"\"\"\n if self.shape == ():\n return str(self.dtype)\n return '(%s), %s' % (', '.join(map(str, self.shape)),\n str(self.dtype))\n\n @property\n def rank(self):\n return self._rank\n\n @property\n def requires_grad(self):\n \"\"\"It indicates that ``grad`` will be set in backward calculation.\"\"\"\n return self._requires_grad\n\n def get_variable(self):\n \"\"\"Returns the corresponding :class:`Variable` object.\n\n VariableNode object holds a weak reference of the variable object. If\n the reference is alive, it is returned by this property. Otherwise,\n this property creates a new :class:`Variable` object from this node\n object and returns it.\n\n Returns:\n Variable: The variable object that refers this node.\n\n \"\"\"\n var = self._variable()\n if var is not None:\n return var\n\n var = Variable(self.data, name=self.name,\n requires_grad=self._requires_grad)\n var._node = self\n return var\n\n def set_creator(self, creator):\n \"\"\"Sets a :class:`Function` object that created this node.\n\n This method is equivalent to ``self.creator = creator``. A\n :class:`FunctionNode` object can also be passed.\n\n Args:\n creator (Function or FunctionNode): Function that has created this\n variable.\n\n \"\"\"\n self.creator = creator\n\n def set_creator_node(self, creator_node):\n \"\"\"Sets a :class:`FunctionNode` object that created this node.\n\n This method is equivalent to ``self.creator_node = creator_node``. A\n :class:`Function` object can also be passed, in which case the\n :attr:`~Function.node` object is extracted.\n\n Args:\n creator_node (FunctionNode or Function): Function node that has\n this variable as an output.\n\n \"\"\"\n self.creator_node = creator_node\n\n def unchain(self):\n \"\"\"Deletes the reference to the creator of this variable node.\n\n This method is equivalent to ``self.creator_node = None``.\n\n \"\"\"\n self.creator_node = None\n\n def retain_data(self):\n \"\"\"Lets the node hold a reference to the underlying data array.\n\n This method gets the data array of the corresponding variable and keeps\n it. If the weak reference to the corresponding variable is dead, it\n raises an error.\n\n \"\"\"\n variable = self._variable()\n if variable is not None:\n self.data = variable.data\n else:\n raise RuntimeError('cannot retain variable data: the variable has '\n 'been already released')\n\n def _set_data_type(self, d):\n if d is None:\n self.dtype = None\n self.shape = None\n else:\n self.dtype = d.dtype\n self.shape = d.shape\n\n def _check_old_style_gradient(self):\n if self._old_style_grad_generator is not None:\n raise RuntimeError(\n 'cannot twice-differentiate an old style Function \"%s\"' %\n self._old_style_grad_generator)\n\n\ndef _create_variable(data, name, grad, requires_grad):\n return Variable(\n data, name=name, grad=grad, requires_grad=requires_grad)\n\n\nclass Variable(object):\n\n \"\"\"__init__(data=None, *, name=None, grad=None, requires_grad=True)\n\n Array with a structure to keep track of computation.\n\n Every variable holds a data array of type either :class:`numpy.ndarray` or\n :class:`cupy.ndarray`.\n\n A variable object holds a data array and a :class:`VariableNode` object of\n a computational graph. If the variable is constructed by the user, the node\n is *root* and does not hold any parent. If the variable is constructed by a\n :class:`FunctionNode` object, the node holds a reference to its parent\n called :attr:`creator_node`. This reference is used in backpropagation to\n backtrack the graph.\n\n Users can disable (resp. enable) this chaining behavior by calling\n :func:`~chainer.no_backprop_mode` (resp.\n :func:`~chainer.force_backprop_mode`).\n In the former context, a variable never creates a computational graph,\n whereas in the latter context, it is forced to create.\n\n .. warning::\n\n ``volatile`` argument is not supported anymore since v2.\n Instead, use :func:`chainer.no_backprop_mode`.\n\n Args:\n data (numpy.ndarray or cupy.ndarray): Initial data array.\n name (str): Name of the variable.\n grad (numpy.ndarray or cupy.ndarray): Initial gradient array.\n requires_grad (bool): Boolean indicating whether ``grad`` will be set\n in backward calculation.\n\n Attributes:\n data: Data array of type either :class:`numpy.ndarray` or\n :class:`cupy.ndarray`. If it is None, the variable is left in an\n uninitialized state.\n grad_var (Variable): Gradient variable.\n\n \"\"\" # NOQA\n\n def __init__(self, data=None, **kwargs):\n argument.check_unexpected_kwargs(\n kwargs, volatile='volatile argument is not supported anymore. '\n 'Use chainer.using_config')\n name, grad, requires_grad \\\n = argument.parse_kwargs(\n kwargs, ('name', None), ('grad', None),\n ('requires_grad', True))\n\n if (data is not None and\n not isinstance(data, (numpy.ndarray, cuda.ndarray))):\n msg = '''numpy.ndarray or cuda.ndarray are expected.\nActual: {0}'''.format(type(data))\n raise TypeError(msg)\n\n # Use a list as a data structure to hold the data array indirectly to\n # abstract its initialized/uninitialized state.\n self._data = [data]\n self._requires_grad = requires_grad\n self._node = VariableNode(self, name)\n self._grad_var = None if grad is None else Variable(grad)\n\n def __copy__(self):\n return self._copy_to(Variable())\n\n def _copy_to(self, target):\n target.__dict__ = copy.copy(self.__dict__)\n target._node = VariableNode(target, self.name)\n return target\n\n def __reduce__(self):\n return _create_variable, (self.data, self.name, self.grad,\n self._requires_grad)\n\n def __repr__(self):\n return variable_repr(self)\n\n def __str__(self):\n return variable_str(self)\n\n @property\n def name(self):\n return self._node.name\n\n @name.setter\n def name(self, n):\n self._node.name = n\n\n def summary(self):\n if self.name:\n return '<variable %s>' % self.name\n else:\n return '<variable at 0x%x>' % id(self)\n\n def debug_print(self):\n \"\"\"Display a summary of the stored data and location of the Variable\"\"\"\n\n msg = \"\"\"{summary}\n- device: {device}\n- backend: {background}\n- shape: {shape}\n- dtype: {dtype}\n- statistics: {stats}\n- grad: {grad}\"\"\"\n\n stats_msg = 'mean={0:.8f}, std={1:.8f}'\n\n try:\n device = self.data.device\n except AttributeError:\n device = 'CPU'\n\n with cuda.get_device_from_array(self.data) as dev:\n xp = numpy if int(dev) == -1 else cuda.cupy\n\n if self.grad is None:\n grad = None\n elif xp.all(self.grad == 0):\n grad = 0\n else:\n grad = stats_msg.format(float(xp.mean(self.grad)),\n float(xp.std(self.grad)))\n\n stats = stats_msg.format(float(xp.mean(self.data)),\n float(xp.std(self.data)))\n\n return msg.format(summary=self.summary(),\n grad=grad, shape=self.data.shape,\n background=type(self.data),\n dtype=self.data.dtype, device=device,\n stats=stats)\n\n def __pos__(self):\n return self\n\n def __len__(self):\n \"\"\"Returns the first dimension of the data array.\n\n Returns:\n int: Number of the first dimension of the data array.\n\n \"\"\"\n return len(self.data)\n\n @property\n def label(self):\n \"\"\"Short text that represents the variable.\"\"\"\n return self._node.label\n\n @property\n def creator(self):\n \"\"\"Function implementation that created this variable.\n\n When this variable has been created by an old-style function (i.e., it\n is implemented as a subclass of :class:`Function`), this property\n returns that :class:`Function` object.\n\n When this variable has been created by a new-style function (i.e., it\n is implemented as a subclass of :class:`FunctionNode` class), this\n property returns that node object.\n\n \"\"\"\n return self._node.creator\n\n @creator.setter\n def creator(self, func):\n self._node.creator = func\n\n @property\n def creator_node(self):\n \"\"\":class:`FunctionNode` object that created this variable.\n\n This property has a setter to which ``None`` can be set. Setting\n ``None`` to this property is equivalent to call :meth:`unchain`;\n it purges the variable from the function that created this variable.\n\n The setter also accepts the original :class:`FunctionNode` object that\n created this variable. For example, you can once set ``None`` to this\n property and then set the original value again.\n\n .. note::\n Setting an irrelevant :meth:`FunctionNode` object does not emit any\n error immediately, whereas the behavior is undefined. Do not set\n a :meth:`FunctionNode` object that did not create this variable\n object.\n\n \"\"\"\n return self._node._creator_node\n\n @creator_node.setter\n def creator_node(self, func):\n self._node.creator_node = func\n\n @property\n def data(self):\n return self._data[0]\n\n @data.setter\n def data(self, d):\n self._data[0] = d\n self._node._set_data_type(d)\n\n @property\n def grad(self):\n \"\"\"Gradient array of this variable.\n\n Not that this property returns the underlying array of the gradient\n variable instead of the gradient variable itself; to get/set\n gradient variable, use :attr:`grad_var` instead.\n\n \"\"\"\n gv = self._grad_var\n return None if gv is None else gv.data\n\n @grad.setter\n def grad(self, g):\n self.grad_var = None if g is None else Variable(g)\n\n @property\n def grad_var(self):\n return self._grad_var\n\n @grad_var.setter\n def grad_var(self, g):\n if g is not None:\n _check_grad_type(None, self, g.data)\n self._grad_var = g\n\n @property\n def shape(self):\n return self.data.shape\n\n @property\n def ndim(self):\n return self.data.ndim\n\n @property\n def size(self):\n return self.data.size\n\n @property\n def dtype(self):\n return self.data.dtype\n\n @property\n def rank(self):\n return self._node.rank\n\n @property\n def node(self):\n return self._node\n\n @property\n def requires_grad(self):\n \"\"\"It indicates that ``grad`` will be set in backward calculation.\"\"\"\n return self._requires_grad\n\n def to_cpu(self):\n \"\"\"Copies the data and gradient arrays to CPU.\"\"\"\n if self.data is None:\n return\n\n self._data = [cuda.to_cpu(self.data)]\n if self._grad_var is not None:\n self._grad_var.to_cpu()\n # ensure that the node tracks the device migration\n node = self._node\n if node._data is not None:\n node.retain_data()\n\n def to_gpu(self, device=None):\n \"\"\"Copies the data and gradient arrays to specified GPU.\n\n Args:\n device: Target device specifier. If omitted, the current device is\n used.\n\n \"\"\"\n if self.data is None:\n self._initial_device = (cuda.Device().id\n if device is None else device)\n else:\n self._data = [cuda.to_gpu(self.data, device)]\n if self._grad_var is not None:\n self._grad_var.to_gpu(device)\n # ensure that the node tracks the device migration\n node = self._node\n if node._data is not None:\n node.retain_data()\n\n def cleargrad(self):\n \"\"\"Clears the gradient array.\"\"\"\n self._grad_var = None\n\n def zerograd(self):\n \"\"\"Initializes the gradient array by zeros.\n\n Note that the gradient variable is unchained from the computational\n graph by this method because this operation breaks the backprop\n validity.\n\n .. deprecated:: v1.15\n Use :meth:`cleargrad` instead.\n\n \"\"\"\n warnings.warn(\n 'Variable.zerograd is deprecated. Use Variable.cleargrad instead.',\n DeprecationWarning)\n\n if self.data is None:\n return\n\n with cuda.get_device_from_array(self.data) as dev:\n gv = self._grad_var\n if gv is None:\n xp = numpy if dev.id == -1 else cuda.cupy\n self.grad = xp.zeros_like(self.data)\n else:\n gv.unchain()\n gv.data.fill(0)\n\n def copydata(self, var):\n \"\"\"Copies the data array from given source variable.\n\n This method copies the data array from given variable to this variable.\n The copy is done even if the arrays reside on different devices,\n including across the host and a GPU device. If this variable has an\n uninitialized data array, this method initializes it by the data array\n of the given variable. Similarly, if the given variable has an\n uninitialized data array, this method initializes it by the data array\n of this variable (``self``). If both are uninitialized, this method\n does nothing.\n\n Args:\n var (Variable): Source variable.\n\n \"\"\"\n src = var.data\n dst = self.data\n if src is None:\n if dst is None:\n return\n var.initialize(self.shape)\n src = var.data\n elif dst is None:\n self.initialize(src.shape)\n dst = self.data\n src_xp = cuda.get_array_module(src)\n dst_xp = cuda.get_array_module(dst)\n if dst_xp is src_xp:\n dst_xp.copyto(dst, src)\n elif dst_xp is numpy:\n dst_xp.copyto(dst, src.get())\n else:\n dst.set(src)\n\n def addgrad(self, var):\n \"\"\"Accumulates the gradient array from given source variable.\n\n This method adds the gradient of a given variable to the gradient of\n this variable. The accumulation is even done across the host and\n different devices. If this variable has uninitialized data/grad arrays,\n this method initializes it with the shape of the given variable and\n then accumulates the gradient.\n\n Args:\n var (Variable): Source variable.\n\n \"\"\"\n src = var._grad_var\n if src is None:\n return\n\n if self.data is None:\n self.initialize(var.shape)\n dst = self._grad_var\n\n src_dev = cuda.get_device_from_array(src.data)\n dst_dev = cuda.get_device_from_array(self.data)\n\n if src_dev.id != dst_dev.id:\n src = chainer.functions.copy(src, dst_dev.id)\n self._grad_var = src if dst is None else src + dst\n\n def set_creator(self, gen_func):\n \"\"\"Notifies the variable that the given function is its creator.\n\n Args:\n gen_func (Function): Function object that creates this variable as\n one of its outputs.\n\n \"\"\"\n self._node.set_creator(gen_func)\n\n def set_creator_node(self, fnode):\n \"\"\"Notifies the variable that the given node is its creator.\n\n Args:\n fnode (FunctionNode): Function node that has this variable as an\n output.\n\n \"\"\"\n self._node.set_creator_node(fnode)\n\n def backward(self, retain_grad=False):\n \"\"\"Runs error backpropagation (a.k.a. backprop) from this variable.\n\n On backprop, :meth:`FunctionNode.backward` is called on each\n :class:`FunctionNode` object appearing in the backward graph starting\n from this variable. The backward graph is represented by backward\n references from variable nodes to their creators, and from function\n nodes to their input variable nodes. The backprop stops at all root\n nodes. Some function nodes set ``None`` as gradients of some inputs,\n where further backprop does not take place at such inputs.\n\n This method uses :data:`grad` as the initial error array. User can\n manually set a gradient array before calling this method. If\n :data:`data` contains only one element (i.e., it is scalar) and\n :data:`grad` is ``None``, then this method automatically complements\n 1.0 as the initial error. This is useful on starting backprop from\n some scalar loss value.\n\n Note that this method does not support *differentiable backprop*. Use\n :func:`grad` to compute the gradient of gradients.\n\n Args:\n retain_grad (bool): If ``True``, the gradient arrays of all\n intermediate variables are kept. Otherwise, :data:`grad` of the\n intermediate variables are set to ``None`` on appropriate\n timing, which may reduce the maximum memory consumption.\n\n In most cases of training some models, the purpose of backprop\n is to compute gradients of parameters, not of all variables,\n and therefore it is recommended to set this flag ``False``.\n\n \"\"\"\n self._node._check_old_style_gradient()\n if self.creator_node is None:\n return\n initial_device = None\n if cuda.available and isinstance(self.data, cuda.cupy.ndarray):\n try:\n initial_device = cuda.Device()\n except cuda.cupy.cuda.runtime.CUDARuntimeError as e:\n if e.status != 38: # cudaErrorNoDevice\n raise\n\n is_debug = chainer.is_debug()\n\n cand_funcs = []\n seen_set = set()\n grads = {}\n\n # Initialize error by 1, if this is a loss variable\n if self.data.size == 1 and self._grad_var is None:\n with cuda.get_device_from_array(self.data) as device:\n if device is cuda.DummyDevice:\n self.grad = numpy.ones_like(self.data)\n else:\n self.grad = cuda.cupy.ones_like(self.data)\n grads[self._node] = self._grad_var\n\n def add_cand(cand):\n if cand not in seen_set:\n # Negate since heapq is min-heap\n heapq.heappush(cand_funcs, (-cand.rank, len(seen_set), cand))\n seen_set.add(cand)\n\n add_cand(self.creator_node)\n\n def get_grad(node):\n if node is None:\n return None\n if node in grads:\n return grads[node]\n return node.grad_var\n\n while cand_funcs:\n _, _, func = heapq.heappop(cand_funcs)\n inputs = func.inputs\n outputs = [y() for y in func.outputs] # access via weak ref\n\n in_data = tuple([x.data for x in inputs])\n out_grad = tuple([get_grad(y) for y in outputs])\n out_grad_data = tuple(\n [None if g is None else g.data for g in out_grad])\n hooks = chainer.get_function_hooks()\n if func._n_local_function_hooks != 0:\n hooks = collections.OrderedDict(hooks)\n hooks.update(func.local_function_hooks)\n hooks = hooks.values() # avoid six for performance\n\n cuda.get_device_from_array(*in_data).use()\n for hook in hooks:\n hook.backward_preprocess(func, in_data, out_grad_data)\n\n # Collect the current input gradients.\n #\n # Note (Tokui): When the same variable is passed to multiple input\n # slots (e.g. an expression like ``f(x, x)``), it makes the\n # gradient accumulation complicated since the back-propagated\n # gradients w.r.t. the first and second argument should be\n # accumulated to the current gradient w.r.t. the same variable.\n # In this case, the current implementation passes the current\n # gradient only to the first occurrence of the variable in the\n # input tuple and passes ``None`` to the rest of the occurrences.\n # For example, when the input variables are ``(x, x)``, the\n # input gradient passed to the ``backward_accumulate`` method is\n # ``(gx, None)`` where ``gx`` is the current gradient of ``x``.\n # See also the docstring of ``FunctionNode.backward_accumulate``.\n target_input_indexes = [\n i for i, x in enumerate(inputs) if x.requires_grad\n ]\n target_inputs = [inputs[i] for i in target_input_indexes]\n in_grad = []\n for i, index_i in enumerate(target_input_indexes):\n x = inputs[index_i]\n if x in target_inputs[:i]:\n # Pass ``None`` for duplicated input variables except for\n # the first occurrence (see the comment above).\n gx = None\n elif x in grads:\n gx = grads[x]\n elif x.creator_node is None:\n x._check_old_style_gradient()\n # accumulate the gradient only if the node is a leaf\n gx = x.grad_var\n else:\n gx = None\n in_grad.append(gx)\n\n gxs = func.backward_accumulate(\n target_input_indexes, out_grad, in_grad)\n\n assert len(gxs) == len(in_grad)\n for hook in hooks:\n hook.backward_postprocess(func, in_data, out_grad_data)\n\n if is_debug:\n for gx in gxs:\n if gx is None:\n continue\n gx_data = gx.data\n cuda.get_device_from_array(gx_data).use()\n if cuda.get_array_module(gx_data).isnan(gx_data).any():\n msg = 'NaN is detected on backward computation'\n raise RuntimeError(msg)\n\n if not retain_grad:\n for y in outputs:\n if y is not None and y is not self.node:\n grads[y] = None\n y_var = y.get_variable()\n if y_var is not None:\n y_var._grad_var = None\n\n for i, gx in enumerate(gxs):\n if gx is None:\n continue\n\n x = target_inputs[i]\n if not x.requires_grad:\n continue\n\n _check_grad_type(func, x, gx.data)\n\n if x in target_inputs[:i]:\n # Accumulate the duplicated gradients here. See the comment\n # above the code that builds ``in_grad``.\n cur_gx = grads[x]\n grads[x] = gx if cur_gx is None else gx + cur_gx\n else:\n grads[x] = gx\n\n x_var = x.get_variable()\n if x_var is not None:\n x_var._grad_var = grads[x]\n\n if x.creator_node is not None:\n add_cand(x.creator_node)\n\n del gxs # to reduce memory usage\n if initial_device is not None:\n initial_device.use()\n\n def reshape(self, *shape):\n \"\"\"Returns a variable of a different shape and the same content.\n\n .. seealso::\n :func:`chainer.functions.reshape` for full documentation,\n\n \"\"\"\n if len(shape) == 1 and isinstance(shape[0], (tuple, list)):\n shape = shape[0]\n return chainer.functions.reshape(self, shape)\n\n def transpose(self, *axes):\n \"\"\"Permute the dimensions of an input variable without copy.\n\n .. seealso::\n :func:`chainer.functions.transpose` for full documentation.\n\n \"\"\"\n if len(axes) == 0:\n axes = None\n elif len(axes) == 1 and (isinstance(axes[0], (tuple, list)) or\n axes[0] is None):\n axes = axes[0]\n return chainer.functions.transpose(self, axes)\n\n def unchain(self):\n \"\"\"Deletes the reference to the creator of this variable.\n\n This method deletes the reference to the creator from the corresponding\n variable node. Unlike :meth:`unchain_backward`, it does not backtrack\n the graph.\n\n This method is equivalent to ``self.creator_node = None``.\n\n \"\"\"\n self.creator_node = None\n\n def unchain_backward(self):\n \"\"\"Deletes references between variable nodes and functions backward.\n\n After this method completes, intermediate variable nodes and functions\n that are not referenced from anywhere are deallocated by reference\n count GC. Also this variable itself deletes the reference to its\n creator function from the node, i.e. the node becomes root in the\n computation graph. It indicates that backprop after unchaining stops at\n this variable. This behavior is useful to implement truncated BPTT.\n\n \"\"\"\n cand_funcs = []\n seen_set = set()\n\n def add_cand(cand):\n if cand is not None and cand not in seen_set:\n cand_funcs.append(cand)\n seen_set.add(cand)\n\n add_cand(self.creator_node)\n\n while cand_funcs:\n func = cand_funcs.pop()\n for var in func.inputs:\n add_cand(var.creator_node)\n func.unchain()\n\n def retain_data(self):\n \"\"\"Lets the corresponding variable node keep the underlying array.\"\"\"\n self._node.data = self._data[0]\n\n def __lt__(self, other):\n raise NotImplementedError()\n\n def __le__(self, other):\n raise NotImplementedError()\n\n def __eq__(self, other):\n raise NotImplementedError()\n\n def __ne__(self, other):\n raise NotImplementedError()\n\n def __gt__(self, other):\n raise NotImplementedError()\n\n def __ge__(self, other):\n raise NotImplementedError()\n\n def __nonzero__(self):\n raise NotImplementedError()\n\n def __bool__(self):\n raise NotImplementedError()\n\n __array_priority__ = 200\n __hash__ = None\n\n\nclass Parameter(Variable):\n\n \"\"\"Parameter variable that can be registered to a link.\n\n Parameter is a subclass of :class:`Variable`. It almost behaves as same\n as a usual variable except that a parameter can be registered to a\n :class:`~chainer.Link` object just by assigning it to an attribute of\n the link within an :meth:`~chainer.Link.init_scope` context.\n\n Parameter also supports an initialization by an initializer. It can have\n two initializers: one for the data array, and the other for the gradient\n array. The initializer only specifies the way of filling the elements of\n these arrays, and the shape information is specified at the initialization\n point.\n\n When a link that the parameter has been registered to is passed to an\n :class:`~chainer.GradientMethod`, an update rule is set to the parameter.\n This update rule specifies how to update the data array of the parameter\n using its gradient array.\n\n Args:\n initializer (~chainer.Initializer or numpy.ndarray or cupy.ndarray):\n Initializer of the data array. If ``shape`` is given, this\n initializer is immediately used to initialize the data array.\n Otherwise, if it is an array, it is immediately used as the data\n array, and otherwise the data array is left uninitialized and will\n be initialized by this initializer in :meth:`initialize`. It can\n also be a scalar, in which case the data array will be filled by\n this scalar. Note that float32 is used in this case.\n shape (int or tuple of int or None): Shape of the parameter. If it is\n ``None``, the initialization is deferred to the call of\n :meth:`initialize`.\n name (str): Name of the parameter.\n\n Attributes:\n initializer: Initializer of the data array. It is used for\n initializing the data array of an uninitialized variable.\n update_rule: :class:`~chainer.optimizer.UpdateRule` instance that\n updates this variable as a parameter. This argument is set to\n :attr:`update_rule`.\n\n \"\"\"\n\n initializer = None\n _grad_initializer = None\n _initial_device = None\n\n def __init__(self, initializer=None, shape=None, name=None):\n if initializer is None:\n initializer = constant.NaN()\n elif numpy.isscalar(initializer):\n initializer = constant.Constant(initializer)\n if shape is None:\n if isinstance(initializer, (numpy.ndarray, cuda.ndarray)):\n # parameter initialized by the initial array\n super(Parameter, self).__init__(initializer, name=name)\n else:\n # uninitialized parameter\n super(Parameter, self).__init__(name=name)\n self.initializer = initializer\n dtype = getattr(initializer, 'dtype', numpy.float32)\n self._grad_initializer = constant.NaN(dtype)\n else:\n # parameter initialized with a given shape\n if isinstance(initializer, (numpy.ndarray, cuda.ndarray)):\n xp = cuda.get_array_module(initializer)\n initializer = constant.Constant(initializer)\n else:\n xp = numpy\n data = initializers.generate_array(initializer, shape, xp)\n grad = xp.full_like(data, numpy.nan)\n super(Parameter, self).__init__(data, name=name, grad=grad)\n\n self.update_rule = None\n\n def __copy__(self):\n return self._copy_to(Parameter())\n\n def __reduce__(self):\n return _recover_parameter, (self.data, self.name, self.grad,\n self.initializer, self.update_rule)\n\n def to_cpu(self):\n super(Parameter, self).to_cpu()\n if self.data is None:\n self._initial_device = None\n\n def to_gpu(self, device=None):\n super(Parameter, self).to_gpu(device)\n if self.data is None:\n if device is None:\n device = cuda.Device().id\n self._initial_device = device\n\n def cleargrad(self):\n super(Parameter, self).cleargrad()\n if self.data is None:\n self._grad_initializer = None\n\n def zerograd(self):\n super(Parameter, self).zerograd()\n if self.data is None:\n dtype = getattr(self.initializer, 'dtype', None)\n self._grad_initializer = initializers.Zero(dtype)\n\n def initialize(self, shape):\n \"\"\"Initializes the uninitialized variable.\n\n Uninitialized variable is a variable created with the data array set to\n None. This method creates and initializes the data array. The shape of\n the variable can be left unknown until this method is called.\n\n Args:\n shape (tuple of int): Shape of the data array.\n\n \"\"\"\n xp = numpy if self._initial_device is None else cuda.cupy\n with cuda.get_device_from_id(self._initial_device):\n data = initializers.generate_array(self.initializer, shape, xp)\n\n ginit = self._grad_initializer\n grad = None if ginit is None else initializers.generate_array(\n ginit, shape, xp)\n\n self._data[0] = data\n self.grad = grad\n\n def update(self):\n \"\"\"Updates the data array using the gradient and the update rule.\n\n This method updates the parameter using the attached update rule.\n\n \"\"\"\n if self.update_rule is not None:\n self.update_rule.update(self)\n\n\ndef _recover_parameter(data, name, grad, initializer, update_rule):\n p = Parameter(initializer=initializer, name=name)\n p.data = data\n p.grad = grad\n p.update_rule = update_rule\n return p\n",
"path": "chainer/variable.py"
}
] | diff --git a/chainer/variable.py b/chainer/variable.py
index c66a3fc6ba07..b041411d2ae6 100644
--- a/chainer/variable.py
+++ b/chainer/variable.py
@@ -1058,10 +1058,8 @@ def __nonzero__(self):
def __bool__(self):
raise NotImplementedError()
- def __hash__(self):
- return super(Variable, self).__hash__()
-
__array_priority__ = 200
+ __hash__ = None
class Parameter(Variable):
diff --git a/tests/chainer_tests/test_variable.py b/tests/chainer_tests/test_variable.py
index c53739d12ebf..9f61f25445f2 100644
--- a/tests/chainer_tests/test_variable.py
+++ b/tests/chainer_tests/test_variable.py
@@ -665,6 +665,46 @@ def test_pickle_gpu(self):
cp.testing.assert_array_equal(x.grad, d.grad)
+class TestVariableBasic(unittest.TestCase):
+ def test_unhashable(self):
+ a = chainer.Variable(np.ones((2,)))
+ with six.assertRaisesRegex(self, TypeError, '^unhashable type: '):
+ hash(a)
+
+ def test_unequatable(self):
+ a = chainer.Variable(np.ones((2,)))
+ b = chainer.Variable(np.ones((2,)))
+ with self.assertRaises(NotImplementedError):
+ a == b
+ with self.assertRaises(NotImplementedError):
+ a == a
+ with self.assertRaises(NotImplementedError):
+ a != b
+ with self.assertRaises(NotImplementedError):
+ a != a
+
+ def test_uncomparable(self):
+ a = chainer.Variable(np.ones((2,)))
+ b = chainer.Variable(np.ones((2,)))
+ with self.assertRaises(NotImplementedError):
+ a < b
+ with self.assertRaises(NotImplementedError):
+ a <= b
+ with self.assertRaises(NotImplementedError):
+ a > b
+ with self.assertRaises(NotImplementedError):
+ a >= b
+
+ def test_bool_inconvertible(self):
+ a = chainer.Variable(np.ones((2,)))
+ with self.assertRaises(NotImplementedError):
+ if a:
+ pass
+ with self.assertRaises(NotImplementedError):
+ if not a:
+ pass
+
+
class TestParameter(unittest.TestCase):
def setUp(self):
|
inventree__InvenTree-5627 | Stocktake doesn't save parts with no stock
### Please verify that this bug has NOT been raised before.
- [X] I checked and didn't find a similar issue
### Describe the bug*
Stocktake is ignoring active parts with 0 stock. (see https://github.com/inventree/InvenTree/blob/master/InvenTree/part/stocktake.py#L252-L254)
### Steps to Reproduce
1. Add a Part
2. Give it some Stock
3. Run stocktake
4. Sell all the Stock
5. Run stocktake again
6. In the Parts stocktake you'll see no new ("0") entry
### Expected behaviour
If I have an active part and I run stocktake, I expect the Part to be noted down with "0 Stock at DateTime".
### Deployment Method
- [X] Docker
- [ ] Bare metal
### Version Information
# Version Information:
InvenTree-Version: 0.13.0 dev
Django Version: 3.2.21
Commit Hash: 2b0d81f
Commit Date: 2023-09-25
Database: postgresql
Debug-Mode: False
Deployed using Docker: True
Platform: Linux-5.15.0-82-generic-x86_64-with
Installer: DOC
Active plugins: False
### Please verify if you can reproduce this bug on the demo site.
- [X] I can reproduce this bug on the demo site.
### Relevant log output
_No response_
| [
{
"content": "\"\"\"Stocktake report functionality\"\"\"\n\nimport io\nimport logging\nimport time\nfrom datetime import datetime\n\nfrom django.contrib.auth.models import User\nfrom django.core.files.base import ContentFile\nfrom django.utils.translation import gettext_lazy as _\n\nimport tablib\nfrom djmoney.contrib.exchange.models import convert_money\nfrom djmoney.money import Money\n\nimport common.models\nimport InvenTree.helpers\nimport part.models\nimport stock.models\n\nlogger = logging.getLogger('inventree')\n\n\ndef perform_stocktake(target: part.models.Part, user: User, note: str = '', commit=True, **kwargs):\n \"\"\"Perform stocktake action on a single part.\n\n arguments:\n target: A single Part model instance\n commit: If True (default) save the result to the database\n user: User who requested this stocktake\n\n kwargs:\n exclude_external: If True, exclude stock items in external locations (default = False)\n location: Optional StockLocation to filter results for generated report\n\n Returns:\n PartStocktake: A new PartStocktake model instance (for the specified Part)\n\n Note that while we record a *total stocktake* for the Part instance which gets saved to the database,\n the user may have requested a stocktake limited to a particular location.\n\n In this case, the stocktake *report* will be limited to the specified location.\n \"\"\"\n\n # Determine which locations are \"valid\" for the generated report\n location = kwargs.get('location', None)\n locations = location.get_descendants(include_self=True) if location else []\n\n # Grab all \"available\" stock items for the Part\n # We do not include variant stock when performing a stocktake,\n # otherwise the stocktake entries will be duplicated\n stock_entries = target.stock_entries(in_stock=True, include_variants=False)\n\n exclude_external = kwargs.get('exclude_external', False)\n\n if exclude_external:\n stock_entries = stock_entries.exclude(location__external=True)\n\n # Cache min/max pricing information for this Part\n pricing = target.pricing\n\n if not pricing.is_valid:\n # If pricing is not valid, let's update\n logger.info(\"Pricing not valid for %s - updating\", target)\n pricing.update_pricing(cascade=False)\n pricing.refresh_from_db()\n\n base_currency = common.settings.currency_code_default()\n\n # Keep track of total quantity and cost for this part\n total_quantity = 0\n total_cost_min = Money(0, base_currency)\n total_cost_max = Money(0, base_currency)\n\n # Separately, keep track of stock quantity and value within the specified location\n location_item_count = 0\n location_quantity = 0\n location_cost_min = Money(0, base_currency)\n location_cost_max = Money(0, base_currency)\n\n for entry in stock_entries:\n\n entry_cost_min = None\n entry_cost_max = None\n\n # Update price range values\n if entry.purchase_price:\n entry_cost_min = entry.purchase_price\n entry_cost_max = entry.purchase_price\n\n else:\n # If no purchase price is available, fall back to the part pricing data\n entry_cost_min = pricing.overall_min or pricing.overall_max\n entry_cost_max = pricing.overall_max or pricing.overall_min\n\n # Convert to base currency\n try:\n entry_cost_min = convert_money(entry_cost_min, base_currency) * entry.quantity\n entry_cost_max = convert_money(entry_cost_max, base_currency) * entry.quantity\n except Exception:\n\n entry_cost_min = Money(0, base_currency)\n entry_cost_max = Money(0, base_currency)\n\n # Update total cost values\n total_quantity += entry.quantity\n total_cost_min += entry_cost_min\n total_cost_max += entry_cost_max\n\n # Test if this stock item is within the specified location\n if location and entry.location not in locations:\n continue\n\n # Update location cost values\n location_item_count += 1\n location_quantity += entry.quantity\n location_cost_min += entry_cost_min\n location_cost_max += entry_cost_max\n\n # Construct PartStocktake instance\n # Note that we use the *total* values for the PartStocktake instance\n instance = part.models.PartStocktake(\n part=target,\n item_count=stock_entries.count(),\n quantity=total_quantity,\n cost_min=total_cost_min,\n cost_max=total_cost_max,\n note=note,\n user=user,\n )\n\n if commit:\n instance.save()\n\n # Add location-specific data to the instance\n instance.location_item_count = location_item_count\n instance.location_quantity = location_quantity\n instance.location_cost_min = location_cost_min\n instance.location_cost_max = location_cost_max\n\n return instance\n\n\ndef generate_stocktake_report(**kwargs):\n \"\"\"Generated a new stocktake report.\n\n Note that this method should be called only by the background worker process!\n\n Unless otherwise specified, the stocktake report is generated for *all* Part instances.\n Optional filters can by supplied via the kwargs\n\n kwargs:\n user: The user who requested this stocktake (set to None for automated stocktake)\n part: Optional Part instance to filter by (including variant parts)\n category: Optional PartCategory to filter results\n location: Optional StockLocation to filter results\n exclude_external: If True, exclude stock items in external locations (default = False)\n generate_report: If True, generate a stocktake report from the calculated data (default=True)\n update_parts: If True, save stocktake information against each filtered Part (default = True)\n \"\"\"\n\n # Determine if external locations should be excluded\n exclude_external = kwargs.get(\n 'exclude_exernal',\n common.models.InvenTreeSetting.get_setting('STOCKTAKE_EXCLUDE_EXTERNAL', False)\n )\n\n parts = part.models.Part.objects.all()\n user = kwargs.get('user', None)\n\n generate_report = kwargs.get('generate_report', True)\n update_parts = kwargs.get('update_parts', True)\n\n # Filter by 'Part' instance\n if p := kwargs.get('part', None):\n variants = p.get_descendants(include_self=True)\n parts = parts.filter(\n pk__in=[v.pk for v in variants]\n )\n\n # Filter by 'Category' instance (cascading)\n if category := kwargs.get('category', None):\n categories = category.get_descendants(include_self=True)\n parts = parts.filter(category__in=categories)\n\n # Filter by 'Location' instance (cascading)\n # Stocktake report will be limited to parts which have stock items within this location\n if location := kwargs.get('location', None):\n # Extract flat list of all sublocations\n locations = list(location.get_descendants(include_self=True))\n\n # Items which exist within these locations\n items = stock.models.StockItem.objects.filter(location__in=locations)\n\n if exclude_external:\n items = items.exclude(location__external=True)\n\n # List of parts which exist within these locations\n unique_parts = items.order_by().values('part').distinct()\n\n parts = parts.filter(\n pk__in=[result['part'] for result in unique_parts]\n )\n\n # Exit if filters removed all parts\n n_parts = parts.count()\n\n if n_parts == 0:\n logger.info(\"No parts selected for stocktake report - exiting\")\n return\n\n logger.info(\"Generating new stocktake report for %s parts\", n_parts)\n\n base_currency = common.settings.currency_code_default()\n\n # Construct an initial dataset for the stocktake report\n dataset = tablib.Dataset(\n headers=[\n _('Part ID'),\n _('Part Name'),\n _('Part Description'),\n _('Category ID'),\n _('Category Name'),\n _('Stock Items'),\n _('Total Quantity'),\n _('Total Cost Min') + f' ({base_currency})',\n _('Total Cost Max') + f' ({base_currency})',\n ]\n )\n\n parts = parts.prefetch_related('category', 'stock_items')\n\n # Simple profiling for this task\n t_start = time.time()\n\n # Keep track of each individual \"stocktake\" we perform.\n # They may be bulk-commited to the database afterwards\n stocktake_instances = []\n\n total_parts = 0\n\n # Iterate through each Part which matches the filters above\n for p in parts:\n\n # Create a new stocktake for this part (do not commit, this will take place later on)\n stocktake = perform_stocktake(\n p, user, commit=False,\n exclude_external=exclude_external,\n location=location,\n )\n\n if stocktake.quantity == 0:\n # Skip rows with zero total quantity\n continue\n\n total_parts += 1\n\n stocktake_instances.append(stocktake)\n\n # Add a row to the dataset\n dataset.append([\n p.pk,\n p.full_name,\n p.description,\n p.category.pk if p.category else '',\n p.category.name if p.category else '',\n stocktake.location_item_count,\n stocktake.location_quantity,\n InvenTree.helpers.normalize(stocktake.location_cost_min.amount),\n InvenTree.helpers.normalize(stocktake.location_cost_max.amount),\n ])\n\n # Save a new PartStocktakeReport instance\n buffer = io.StringIO()\n buffer.write(dataset.export('csv'))\n\n today = datetime.now().date().isoformat()\n filename = f\"InvenTree_Stocktake_{today}.csv\"\n report_file = ContentFile(buffer.getvalue(), name=filename)\n\n if generate_report:\n report_instance = part.models.PartStocktakeReport.objects.create(\n report=report_file,\n part_count=total_parts,\n user=user\n )\n\n # Notify the requesting user\n if user:\n\n common.notifications.trigger_notification(\n report_instance,\n category='generate_stocktake_report',\n context={\n 'name': _('Stocktake Report Available'),\n 'message': _('A new stocktake report is available for download'),\n },\n targets=[\n user,\n ]\n )\n\n # If 'update_parts' is set, we save stocktake entries for each individual part\n if update_parts:\n # Use bulk_create for efficient insertion of stocktake\n part.models.PartStocktake.objects.bulk_create(\n stocktake_instances,\n batch_size=500,\n )\n\n t_stocktake = time.time() - t_start\n logger.info(\"Generated stocktake report for %s parts in %ss\", total_parts, round(t_stocktake, 2))\n",
"path": "InvenTree/part/stocktake.py"
}
] | [
{
"content": "\"\"\"Stocktake report functionality\"\"\"\n\nimport io\nimport logging\nimport time\nfrom datetime import datetime\n\nfrom django.contrib.auth.models import User\nfrom django.core.files.base import ContentFile\nfrom django.utils.translation import gettext_lazy as _\n\nimport tablib\nfrom djmoney.contrib.exchange.models import convert_money\nfrom djmoney.money import Money\n\nimport common.models\nimport InvenTree.helpers\nimport part.models\nimport stock.models\n\nlogger = logging.getLogger('inventree')\n\n\ndef perform_stocktake(target: part.models.Part, user: User, note: str = '', commit=True, **kwargs):\n \"\"\"Perform stocktake action on a single part.\n\n arguments:\n target: A single Part model instance\n commit: If True (default) save the result to the database\n user: User who requested this stocktake\n\n kwargs:\n exclude_external: If True, exclude stock items in external locations (default = False)\n location: Optional StockLocation to filter results for generated report\n\n Returns:\n PartStocktake: A new PartStocktake model instance (for the specified Part)\n\n Note that while we record a *total stocktake* for the Part instance which gets saved to the database,\n the user may have requested a stocktake limited to a particular location.\n\n In this case, the stocktake *report* will be limited to the specified location.\n \"\"\"\n\n # Determine which locations are \"valid\" for the generated report\n location = kwargs.get('location', None)\n locations = location.get_descendants(include_self=True) if location else []\n\n # Grab all \"available\" stock items for the Part\n # We do not include variant stock when performing a stocktake,\n # otherwise the stocktake entries will be duplicated\n stock_entries = target.stock_entries(in_stock=True, include_variants=False)\n\n exclude_external = kwargs.get('exclude_external', False)\n\n if exclude_external:\n stock_entries = stock_entries.exclude(location__external=True)\n\n # Cache min/max pricing information for this Part\n pricing = target.pricing\n\n if not pricing.is_valid:\n # If pricing is not valid, let's update\n logger.info(\"Pricing not valid for %s - updating\", target)\n pricing.update_pricing(cascade=False)\n pricing.refresh_from_db()\n\n base_currency = common.settings.currency_code_default()\n\n # Keep track of total quantity and cost for this part\n total_quantity = 0\n total_cost_min = Money(0, base_currency)\n total_cost_max = Money(0, base_currency)\n\n # Separately, keep track of stock quantity and value within the specified location\n location_item_count = 0\n location_quantity = 0\n location_cost_min = Money(0, base_currency)\n location_cost_max = Money(0, base_currency)\n\n for entry in stock_entries:\n\n entry_cost_min = None\n entry_cost_max = None\n\n # Update price range values\n if entry.purchase_price:\n entry_cost_min = entry.purchase_price\n entry_cost_max = entry.purchase_price\n\n else:\n # If no purchase price is available, fall back to the part pricing data\n entry_cost_min = pricing.overall_min or pricing.overall_max\n entry_cost_max = pricing.overall_max or pricing.overall_min\n\n # Convert to base currency\n try:\n entry_cost_min = convert_money(entry_cost_min, base_currency) * entry.quantity\n entry_cost_max = convert_money(entry_cost_max, base_currency) * entry.quantity\n except Exception:\n\n entry_cost_min = Money(0, base_currency)\n entry_cost_max = Money(0, base_currency)\n\n # Update total cost values\n total_quantity += entry.quantity\n total_cost_min += entry_cost_min\n total_cost_max += entry_cost_max\n\n # Test if this stock item is within the specified location\n if location and entry.location not in locations:\n continue\n\n # Update location cost values\n location_item_count += 1\n location_quantity += entry.quantity\n location_cost_min += entry_cost_min\n location_cost_max += entry_cost_max\n\n # Construct PartStocktake instance\n # Note that we use the *total* values for the PartStocktake instance\n instance = part.models.PartStocktake(\n part=target,\n item_count=stock_entries.count(),\n quantity=total_quantity,\n cost_min=total_cost_min,\n cost_max=total_cost_max,\n note=note,\n user=user,\n )\n\n if commit:\n instance.save()\n\n # Add location-specific data to the instance\n instance.location_item_count = location_item_count\n instance.location_quantity = location_quantity\n instance.location_cost_min = location_cost_min\n instance.location_cost_max = location_cost_max\n\n return instance\n\n\ndef generate_stocktake_report(**kwargs):\n \"\"\"Generated a new stocktake report.\n\n Note that this method should be called only by the background worker process!\n\n Unless otherwise specified, the stocktake report is generated for *all* Part instances.\n Optional filters can by supplied via the kwargs\n\n kwargs:\n user: The user who requested this stocktake (set to None for automated stocktake)\n part: Optional Part instance to filter by (including variant parts)\n category: Optional PartCategory to filter results\n location: Optional StockLocation to filter results\n exclude_external: If True, exclude stock items in external locations (default = False)\n generate_report: If True, generate a stocktake report from the calculated data (default=True)\n update_parts: If True, save stocktake information against each filtered Part (default = True)\n \"\"\"\n\n # Determine if external locations should be excluded\n exclude_external = kwargs.get(\n 'exclude_exernal',\n common.models.InvenTreeSetting.get_setting('STOCKTAKE_EXCLUDE_EXTERNAL', False)\n )\n\n parts = part.models.Part.objects.all()\n user = kwargs.get('user', None)\n\n generate_report = kwargs.get('generate_report', True)\n update_parts = kwargs.get('update_parts', True)\n\n # Filter by 'Part' instance\n if p := kwargs.get('part', None):\n variants = p.get_descendants(include_self=True)\n parts = parts.filter(\n pk__in=[v.pk for v in variants]\n )\n\n # Filter by 'Category' instance (cascading)\n if category := kwargs.get('category', None):\n categories = category.get_descendants(include_self=True)\n parts = parts.filter(category__in=categories)\n\n # Filter by 'Location' instance (cascading)\n # Stocktake report will be limited to parts which have stock items within this location\n if location := kwargs.get('location', None):\n # Extract flat list of all sublocations\n locations = list(location.get_descendants(include_self=True))\n\n # Items which exist within these locations\n items = stock.models.StockItem.objects.filter(location__in=locations)\n\n if exclude_external:\n items = items.exclude(location__external=True)\n\n # List of parts which exist within these locations\n unique_parts = items.order_by().values('part').distinct()\n\n parts = parts.filter(\n pk__in=[result['part'] for result in unique_parts]\n )\n\n # Exit if filters removed all parts\n n_parts = parts.count()\n\n if n_parts == 0:\n logger.info(\"No parts selected for stocktake report - exiting\")\n return\n\n logger.info(\"Generating new stocktake report for %s parts\", n_parts)\n\n base_currency = common.settings.currency_code_default()\n\n # Construct an initial dataset for the stocktake report\n dataset = tablib.Dataset(\n headers=[\n _('Part ID'),\n _('Part Name'),\n _('Part Description'),\n _('Category ID'),\n _('Category Name'),\n _('Stock Items'),\n _('Total Quantity'),\n _('Total Cost Min') + f' ({base_currency})',\n _('Total Cost Max') + f' ({base_currency})',\n ]\n )\n\n parts = parts.prefetch_related('category', 'stock_items')\n\n # Simple profiling for this task\n t_start = time.time()\n\n # Keep track of each individual \"stocktake\" we perform.\n # They may be bulk-commited to the database afterwards\n stocktake_instances = []\n\n total_parts = 0\n\n # Iterate through each Part which matches the filters above\n for p in parts:\n\n # Create a new stocktake for this part (do not commit, this will take place later on)\n stocktake = perform_stocktake(\n p, user, commit=False,\n exclude_external=exclude_external,\n location=location,\n )\n\n total_parts += 1\n\n stocktake_instances.append(stocktake)\n\n # Add a row to the dataset\n dataset.append([\n p.pk,\n p.full_name,\n p.description,\n p.category.pk if p.category else '',\n p.category.name if p.category else '',\n stocktake.location_item_count,\n stocktake.location_quantity,\n InvenTree.helpers.normalize(stocktake.location_cost_min.amount),\n InvenTree.helpers.normalize(stocktake.location_cost_max.amount),\n ])\n\n # Save a new PartStocktakeReport instance\n buffer = io.StringIO()\n buffer.write(dataset.export('csv'))\n\n today = datetime.now().date().isoformat()\n filename = f\"InvenTree_Stocktake_{today}.csv\"\n report_file = ContentFile(buffer.getvalue(), name=filename)\n\n if generate_report:\n report_instance = part.models.PartStocktakeReport.objects.create(\n report=report_file,\n part_count=total_parts,\n user=user\n )\n\n # Notify the requesting user\n if user:\n\n common.notifications.trigger_notification(\n report_instance,\n category='generate_stocktake_report',\n context={\n 'name': _('Stocktake Report Available'),\n 'message': _('A new stocktake report is available for download'),\n },\n targets=[\n user,\n ]\n )\n\n # If 'update_parts' is set, we save stocktake entries for each individual part\n if update_parts:\n # Use bulk_create for efficient insertion of stocktake\n part.models.PartStocktake.objects.bulk_create(\n stocktake_instances,\n batch_size=500,\n )\n\n t_stocktake = time.time() - t_start\n logger.info(\"Generated stocktake report for %s parts in %ss\", total_parts, round(t_stocktake, 2))\n",
"path": "InvenTree/part/stocktake.py"
}
] | diff --git a/InvenTree/part/stocktake.py b/InvenTree/part/stocktake.py
index aae3b94f17be..f803248c88fa 100644
--- a/InvenTree/part/stocktake.py
+++ b/InvenTree/part/stocktake.py
@@ -249,10 +249,6 @@ def generate_stocktake_report(**kwargs):
location=location,
)
- if stocktake.quantity == 0:
- # Skip rows with zero total quantity
- continue
-
total_parts += 1
stocktake_instances.append(stocktake)
diff --git a/InvenTree/part/test_api.py b/InvenTree/part/test_api.py
index 8d44ab0e9327..57af3632cbc0 100644
--- a/InvenTree/part/test_api.py
+++ b/InvenTree/part/test_api.py
@@ -3015,7 +3015,7 @@ def test_report_list(self):
data = response.data[0]
- self.assertEqual(data['part_count'], 8)
+ self.assertEqual(data['part_count'], 14)
self.assertEqual(data['user'], None)
self.assertTrue(data['report'].endswith('.csv'))
|
lutris__lutris-2885 | Runners list is not updated on a second install/uninstall
**Describe the bug**
The second time you are trying to install a runner on a Lutris session, the runner list won't be updated anymore.
**Current behavior**
The runner list is updated only once per install/uninstall for each runner
**Steps to reproduce**
1. For the sake of this testing, make sure you don't have ZDoom installed.
2. Open Lutris.
3. Open the 'Manage runners' window.
4. Install ZDoom runner.
4.1. As expected, the runner list from the main Lutris window has added the ZDoom entry.
5. Uninstall ZDoom runner.
5.1. As expected, the runner list from the main Lutris window removed the ZDoom entry.
6. Reinstall ZDoom runner.
6.1. The runner list from the main Lutris window haven't added the ZDoom entry!
| [
{
"content": "\"\"\"Sidebar for the main window\"\"\"\n# Standard Library\nimport os\n\n# Third Party Libraries\nfrom gi.repository import GObject, Gtk, Pango\n\n# Lutris Modules\nfrom lutris import pga, platforms, runners\nfrom lutris.game import Game\nfrom lutris.gui.config.runner import RunnerConfigDialog\nfrom lutris.gui.dialogs.runner_install import RunnerInstallDialog\nfrom lutris.gui.dialogs.runners import RunnersDialog\nfrom lutris.util import datapath\n\nTYPE = 0\nSLUG = 1\nICON = 2\nLABEL = 3\nGAMECOUNT = 4\n\n\nclass SidebarRow(Gtk.ListBoxRow):\n\n def __init__(self, id_, type_, name, icon):\n super().__init__()\n self.type = type_\n self.id = id_\n self.btn_box = None\n self.runner = None\n\n self.box = Gtk.Box(spacing=6, margin_start=9, margin_end=9)\n\n # Construct the left column icon space.\n if icon:\n self.box.add(icon)\n else:\n # Place a spacer if there is no loaded icon.\n icon = Gtk.Box(spacing=6, margin_start=9, margin_end=9)\n self.box.add(icon)\n\n label = Gtk.Label(\n label=name,\n halign=Gtk.Align.START,\n hexpand=True,\n margin_top=6,\n margin_bottom=6,\n ellipsize=Pango.EllipsizeMode.END,\n )\n self.box.add(label)\n\n self.add(self.box)\n\n def _create_button_box(self):\n self.btn_box = Gtk.Box(spacing=3, no_show_all=True, valign=Gtk.Align.CENTER, homogeneous=True)\n\n # Creation is delayed because only installed runners can be imported\n # and all visible boxes should be installed.\n self.runner = runners.import_runner(self.id)()\n entries = []\n if self.runner.multiple_versions:\n entries.append((\n \"system-software-install-symbolic\",\n \"Manage Versions\",\n self.on_manage_versions,\n ))\n if self.runner.runnable_alone:\n entries.append((\"media-playback-start-symbolic\", \"Run\", self.runner.run))\n entries.append((\"emblem-system-symbolic\", \"Configure\", self.on_configure_runner))\n for entry in entries:\n btn = Gtk.Button(tooltip_text=entry[1], relief=Gtk.ReliefStyle.NONE, visible=True)\n image = Gtk.Image.new_from_icon_name(entry[0], Gtk.IconSize.MENU)\n image.show()\n btn.add(image)\n btn.connect(\"clicked\", entry[2])\n self.btn_box.add(btn)\n\n self.box.add(self.btn_box)\n\n def on_configure_runner(self, *args): # pylint: disable=unused-argument\n RunnerConfigDialog(self.runner, parent=self.get_toplevel())\n\n def on_manage_versions(self, *args): # pylint: disable=unused-argument\n dlg_title = \"Manage %s versions\" % self.runner.name\n RunnerInstallDialog(dlg_title, self.get_toplevel(), self.runner.name)\n\n def do_state_flags_changed(self, previous_flags): # pylint: disable=arguments-differ\n if self.id is not None and self.type == \"runner\":\n flags = self.get_state_flags()\n if flags & Gtk.StateFlags.PRELIGHT or flags & Gtk.StateFlags.SELECTED:\n if self.btn_box is None:\n self._create_button_box()\n self.btn_box.show()\n elif self.btn_box is not None and self.btn_box.get_visible():\n self.btn_box.hide()\n Gtk.ListBoxRow.do_state_flags_changed(self, previous_flags)\n\n\nclass SidebarHeader(Gtk.Box):\n\n def __init__(self, name):\n super().__init__(orientation=Gtk.Orientation.VERTICAL)\n self.get_style_context().add_class(\"sidebar-header\")\n label = Gtk.Label(\n halign=Gtk.Align.START,\n hexpand=True,\n use_markup=True,\n label=\"<b>{}</b>\".format(name),\n )\n label.get_style_context().add_class(\"dim-label\")\n box = Gtk.Box(margin_start=9, margin_top=6, margin_bottom=6, margin_right=9)\n box.add(label)\n self.add(box)\n if name == \"Runners\":\n manage_runners_button = Gtk.Button.new_from_icon_name(\"emblem-system-symbolic\", Gtk.IconSize.MENU)\n manage_runners_button.props.action_name = \"win.manage-runners\"\n manage_runners_button.props.relief = Gtk.ReliefStyle.NONE\n manage_runners_button.set_margin_right(16)\n manage_runners_button.get_style_context().add_class(\"sidebar-button\")\n box.add(manage_runners_button)\n self.add(Gtk.Separator())\n self.show_all()\n\n\nclass SidebarListBox(Gtk.ListBox):\n __gtype_name__ = \"LutrisSidebar\"\n\n def __init__(self):\n super().__init__()\n self.get_style_context().add_class(\"sidebar\")\n self.installed_runners = []\n self.active_platforms = pga.get_used_platforms()\n self.runners = sorted(runners.__all__)\n self.platforms = sorted(platforms.__all__)\n\n GObject.add_emission_hook(RunnersDialog, \"runner-installed\", self.update)\n GObject.add_emission_hook(RunnersDialog, \"runner-removed\", self.update)\n GObject.add_emission_hook(Game, \"game-updated\", self.update)\n GObject.add_emission_hook(Game, \"game-removed\", self.update)\n\n # TODO: This should be in a more logical location\n icon_theme = Gtk.IconTheme.get_default()\n local_theme_path = os.path.join(datapath.get(), \"icons\")\n if local_theme_path not in icon_theme.get_search_path():\n icon_theme.prepend_search_path(local_theme_path)\n\n all_row = SidebarRow(None, \"runner\", \"All\", None)\n self.add(all_row)\n self.select_row(all_row)\n for runner in self.runners:\n icon_name = runner.lower().replace(\" \", \"\") + \"-symbolic\"\n icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)\n name = runners.import_runner(runner).human_name\n self.add(SidebarRow(runner, \"runner\", name, icon))\n\n self.add(SidebarRow(None, \"platform\", \"All\", None))\n for platform in self.platforms:\n icon_name = (platform.lower().replace(\" \", \"\").replace(\"/\", \"_\") + \"-symbolic\")\n icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)\n self.add(SidebarRow(platform, \"platform\", platform, icon))\n\n self.set_filter_func(self._filter_func)\n self.set_header_func(self._header_func)\n self.update()\n self.show_all()\n\n def _filter_func(self, row):\n if row is None:\n return True\n if row.type == \"runner\":\n if row.id is None:\n return True # 'All'\n return row.id in self.installed_runners\n if len(self.active_platforms) <= 1:\n return False # Hide useless filter\n if row.id is None: # 'All'\n return True\n return row.id in self.active_platforms\n\n def _header_func(self, row, before):\n if row.get_header():\n return\n\n if not before:\n row.set_header(SidebarHeader(\"Runners\"))\n elif before.type == \"runner\" and row.type == \"platform\":\n row.set_header(SidebarHeader(\"Platforms\"))\n\n def update(self, *args): # pylint: disable=unused-argument\n self.installed_runners = [runner.name for runner in runners.get_installed()]\n self.active_platforms = pga.get_used_platforms()\n self.invalidate_filter()\n",
"path": "lutris/gui/widgets/sidebar.py"
}
] | [
{
"content": "\"\"\"Sidebar for the main window\"\"\"\n# Standard Library\nimport os\n\n# Third Party Libraries\nfrom gi.repository import GObject, Gtk, Pango\n\n# Lutris Modules\nfrom lutris import pga, platforms, runners\nfrom lutris.game import Game\nfrom lutris.gui.config.runner import RunnerConfigDialog\nfrom lutris.gui.dialogs.runner_install import RunnerInstallDialog\nfrom lutris.gui.dialogs.runners import RunnersDialog\nfrom lutris.util import datapath\n\nTYPE = 0\nSLUG = 1\nICON = 2\nLABEL = 3\nGAMECOUNT = 4\n\n\nclass SidebarRow(Gtk.ListBoxRow):\n\n def __init__(self, id_, type_, name, icon):\n super().__init__()\n self.type = type_\n self.id = id_\n self.btn_box = None\n self.runner = None\n\n self.box = Gtk.Box(spacing=6, margin_start=9, margin_end=9)\n\n # Construct the left column icon space.\n if icon:\n self.box.add(icon)\n else:\n # Place a spacer if there is no loaded icon.\n icon = Gtk.Box(spacing=6, margin_start=9, margin_end=9)\n self.box.add(icon)\n\n label = Gtk.Label(\n label=name,\n halign=Gtk.Align.START,\n hexpand=True,\n margin_top=6,\n margin_bottom=6,\n ellipsize=Pango.EllipsizeMode.END,\n )\n self.box.add(label)\n\n self.add(self.box)\n\n def _create_button_box(self):\n self.btn_box = Gtk.Box(spacing=3, no_show_all=True, valign=Gtk.Align.CENTER, homogeneous=True)\n\n # Creation is delayed because only installed runners can be imported\n # and all visible boxes should be installed.\n self.runner = runners.import_runner(self.id)()\n entries = []\n if self.runner.multiple_versions:\n entries.append((\n \"system-software-install-symbolic\",\n \"Manage Versions\",\n self.on_manage_versions,\n ))\n if self.runner.runnable_alone:\n entries.append((\"media-playback-start-symbolic\", \"Run\", self.runner.run))\n entries.append((\"emblem-system-symbolic\", \"Configure\", self.on_configure_runner))\n for entry in entries:\n btn = Gtk.Button(tooltip_text=entry[1], relief=Gtk.ReliefStyle.NONE, visible=True)\n image = Gtk.Image.new_from_icon_name(entry[0], Gtk.IconSize.MENU)\n image.show()\n btn.add(image)\n btn.connect(\"clicked\", entry[2])\n self.btn_box.add(btn)\n\n self.box.add(self.btn_box)\n\n def on_configure_runner(self, *args): # pylint: disable=unused-argument\n RunnerConfigDialog(self.runner, parent=self.get_toplevel())\n\n def on_manage_versions(self, *args): # pylint: disable=unused-argument\n dlg_title = \"Manage %s versions\" % self.runner.name\n RunnerInstallDialog(dlg_title, self.get_toplevel(), self.runner.name)\n\n def do_state_flags_changed(self, previous_flags): # pylint: disable=arguments-differ\n if self.id is not None and self.type == \"runner\":\n flags = self.get_state_flags()\n if flags & Gtk.StateFlags.PRELIGHT or flags & Gtk.StateFlags.SELECTED:\n if self.btn_box is None:\n self._create_button_box()\n self.btn_box.show()\n elif self.btn_box is not None and self.btn_box.get_visible():\n self.btn_box.hide()\n Gtk.ListBoxRow.do_state_flags_changed(self, previous_flags)\n\n\nclass SidebarHeader(Gtk.Box):\n\n def __init__(self, name):\n super().__init__(orientation=Gtk.Orientation.VERTICAL)\n self.get_style_context().add_class(\"sidebar-header\")\n label = Gtk.Label(\n halign=Gtk.Align.START,\n hexpand=True,\n use_markup=True,\n label=\"<b>{}</b>\".format(name),\n )\n label.get_style_context().add_class(\"dim-label\")\n box = Gtk.Box(margin_start=9, margin_top=6, margin_bottom=6, margin_right=9)\n box.add(label)\n self.add(box)\n if name == \"Runners\":\n manage_runners_button = Gtk.Button.new_from_icon_name(\"emblem-system-symbolic\", Gtk.IconSize.MENU)\n manage_runners_button.props.action_name = \"win.manage-runners\"\n manage_runners_button.props.relief = Gtk.ReliefStyle.NONE\n manage_runners_button.set_margin_right(16)\n manage_runners_button.get_style_context().add_class(\"sidebar-button\")\n box.add(manage_runners_button)\n self.add(Gtk.Separator())\n self.show_all()\n\n\nclass SidebarListBox(Gtk.ListBox):\n __gtype_name__ = \"LutrisSidebar\"\n\n def __init__(self):\n super().__init__()\n self.get_style_context().add_class(\"sidebar\")\n self.installed_runners = []\n self.active_platforms = pga.get_used_platforms()\n self.runners = sorted(runners.__all__)\n self.platforms = sorted(platforms.__all__)\n\n GObject.add_emission_hook(RunnersDialog, \"runner-installed\", self.update)\n GObject.add_emission_hook(RunnersDialog, \"runner-removed\", self.update)\n GObject.add_emission_hook(Game, \"game-updated\", self.update)\n GObject.add_emission_hook(Game, \"game-removed\", self.update)\n\n # TODO: This should be in a more logical location\n icon_theme = Gtk.IconTheme.get_default()\n local_theme_path = os.path.join(datapath.get(), \"icons\")\n if local_theme_path not in icon_theme.get_search_path():\n icon_theme.prepend_search_path(local_theme_path)\n\n all_row = SidebarRow(None, \"runner\", \"All\", None)\n self.add(all_row)\n self.select_row(all_row)\n for runner in self.runners:\n icon_name = runner.lower().replace(\" \", \"\") + \"-symbolic\"\n icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)\n name = runners.import_runner(runner).human_name\n self.add(SidebarRow(runner, \"runner\", name, icon))\n\n self.add(SidebarRow(None, \"platform\", \"All\", None))\n for platform in self.platforms:\n icon_name = (platform.lower().replace(\" \", \"\").replace(\"/\", \"_\") + \"-symbolic\")\n icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)\n self.add(SidebarRow(platform, \"platform\", platform, icon))\n\n self.set_filter_func(self._filter_func)\n self.set_header_func(self._header_func)\n self.update()\n self.show_all()\n\n def _filter_func(self, row):\n if row is None:\n return True\n if row.type == \"runner\":\n if row.id is None:\n return True # 'All'\n return row.id in self.installed_runners\n if len(self.active_platforms) <= 1:\n return False # Hide useless filter\n if row.id is None: # 'All'\n return True\n return row.id in self.active_platforms\n\n def _header_func(self, row, before):\n if row.get_header():\n return\n\n if not before:\n row.set_header(SidebarHeader(\"Runners\"))\n elif before.type == \"runner\" and row.type == \"platform\":\n row.set_header(SidebarHeader(\"Platforms\"))\n\n def update(self, *args): # pylint: disable=unused-argument\n self.installed_runners = [runner.name for runner in runners.get_installed()]\n self.active_platforms = pga.get_used_platforms()\n self.invalidate_filter()\n return True\n",
"path": "lutris/gui/widgets/sidebar.py"
}
] | diff --git a/lutris/gui/widgets/sidebar.py b/lutris/gui/widgets/sidebar.py
index 35d5488903..e898f57200 100644
--- a/lutris/gui/widgets/sidebar.py
+++ b/lutris/gui/widgets/sidebar.py
@@ -190,3 +190,4 @@ def update(self, *args): # pylint: disable=unused-argument
self.installed_runners = [runner.name for runner in runners.get_installed()]
self.active_platforms = pga.get_used_platforms()
self.invalidate_filter()
+ return True
|
googleapis__google-cloud-python-6332 | Release 'api_core-1.6.0a2'? or 'api_core-1.5.1'?
I'd like to use the changes from https://github.com/googleapis/google-cloud-python/pull/6310 in the library I'm working on.
Not sure about the version number for that one, since:
- I don't know what else has been released since 1.6.0a1
- I don't know what is intended by 1.6.0a1 in https://github.com/googleapis/google-cloud-python/pull/6267 (is it a pre-release?)
| [
{
"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = 'google-api-core'\ndescription = 'Google API client core library'\nversion = '1.6.0a1'\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = 'Development Status :: 5 - Production/Stable'\ndependencies = [\n 'googleapis-common-protos<2.0dev,>=1.5.3',\n 'protobuf>=3.4.0',\n 'google-auth<2.0.0dev,>=0.4.0',\n 'requests<3.0.0dev,>=2.18.0',\n 'setuptools>=34.0.0',\n 'six>=1.10.0',\n 'pytz',\n 'futures>=3.2.0;python_version<\"3.2\"'\n]\nextras = {\n 'grpc': 'grpcio>=1.8.2',\n 'grpcio-gcp': 'grpcio-gcp>=0.2.2'\n}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, 'README.rst')\nwith io.open(readme_filename, encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages()\n if package.startswith('google')]\n\n# Determine which namespaces are needed.\nnamespaces = ['google']\nif 'google.cloud' in packages:\n namespaces.append('google.cloud')\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n url='https://github.com/GoogleCloudPlatform/google-cloud-python',\n classifiers=[\n release_status,\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Operating System :: OS Independent',\n 'Topic :: Internet',\n ],\n platforms='Posix; MacOS X; Windows',\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n include_package_data=True,\n zip_safe=False,\n)\n",
"path": "api_core/setup.py"
}
] | [
{
"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = 'google-api-core'\ndescription = 'Google API client core library'\nversion = '1.5.1'\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = 'Development Status :: 5 - Production/Stable'\ndependencies = [\n 'googleapis-common-protos<2.0dev,>=1.5.3',\n 'protobuf>=3.4.0',\n 'google-auth<2.0.0dev,>=0.4.0',\n 'requests<3.0.0dev,>=2.18.0',\n 'setuptools>=34.0.0',\n 'six>=1.10.0',\n 'pytz',\n 'futures>=3.2.0;python_version<\"3.2\"'\n]\nextras = {\n 'grpc': 'grpcio>=1.8.2',\n 'grpcio-gcp': 'grpcio-gcp>=0.2.2'\n}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, 'README.rst')\nwith io.open(readme_filename, encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages()\n if package.startswith('google')]\n\n# Determine which namespaces are needed.\nnamespaces = ['google']\nif 'google.cloud' in packages:\n namespaces.append('google.cloud')\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n url='https://github.com/GoogleCloudPlatform/google-cloud-python',\n classifiers=[\n release_status,\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Operating System :: OS Independent',\n 'Topic :: Internet',\n ],\n platforms='Posix; MacOS X; Windows',\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n include_package_data=True,\n zip_safe=False,\n)\n",
"path": "api_core/setup.py"
}
] | diff --git a/api_core/CHANGELOG.md b/api_core/CHANGELOG.md
index 0d601ee6b85d..890d88e043d2 100644
--- a/api_core/CHANGELOG.md
+++ b/api_core/CHANGELOG.md
@@ -4,24 +4,21 @@
[1]: https://pypi.org/project/google-api-core/#history
-## 1.6.0a1
+## 1.5.1
-10-18-2018 16:26 PDT
+10-29-2018 13:29 PDT
-### New Features
-- Add methods to api_core used by new autogenerator. ([#6267](https://github.com/googleapis/google-cloud-python/pull/6267))
+### Implementation Changes
+- Don't URL-encode slashes in gRPC request headers. ([#6310](https://github.com/googleapis/google-cloud-python/pull/6310))
### Internal / Testing Changes
-- Fix branch coverage for un-called callbacks. ([#6242](https://github.com/googleapis/google-cloud-python/pull/6242))
-- Fix import order, appeasing lint. ([#6240](https://github.com/googleapis/google-cloud-python/pull/6240))
-- Add / fix badges for PyPI / versions. ([#6158](https://github.com/googleapis/google-cloud-python/pull/6158))
+- Back out changes from [#6267](https://github.com/googleapis/google-cloud-python/pull/6267) / `api_core-1.6.0a1` release. ([#6328](https://github.com/googleapis/google-cloud-python/pull/6328))
## 1.5.0
### New Features
- Add bidi, Bidirection Streaming, to api-core ([#6211](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6211))
-
### Internal / Testing Changes
- Use new Nox ([#6175](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6175))
diff --git a/api_core/setup.py b/api_core/setup.py
index 82668b39ead5..fdff7c34d92a 100644
--- a/api_core/setup.py
+++ b/api_core/setup.py
@@ -22,7 +22,7 @@
name = 'google-api-core'
description = 'Google API client core library'
-version = '1.6.0a1'
+version = '1.5.1'
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
|
cisagov__manage.get.gov-610 | Document our data models
Per our last retrospective, we decided something that would help the growing complexity of our data modeling would be an updated diagram of the data model flows on the backend.
Outcome: A diagram (preferably PlantUML?) saved to our docs folder under architecture.
Here is an old version of this: https://raw.githubusercontent.com/cisagov/getgov/fcf9652e8f1c0e34b221dbfb4eb28767fcfab41e/docs/architecture/diagrams/models.svg
and a conversation around it: https://gsa-tts.slack.com/archives/C03QM0JGSQG/p1661970335955509
| [
{
"content": "\"\"\"\nDjango settings for .gov registrar project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/4.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/4.0/ref/settings/\n\nIF you'd like to see all of these settings in the running app:\n\n```shell\n$ docker-compose exec app python manage.py shell\n>>> from django.conf import settings\n>>> dir(settings)\n```\n\n\"\"\"\nimport environs\nfrom base64 import b64decode\nfrom cfenv import AppEnv # type: ignore\nfrom pathlib import Path\nfrom typing import Final\n\nfrom botocore.config import Config\n\n# # # ###\n# Setup code goes here #\n# # # ###\n\nenv = environs.Env()\n\n# Get secrets from Cloud.gov user provided service, if exists\n# If not, get secrets from environment variables\nkey_service = AppEnv().get_service(name=\"getgov-credentials\")\nif key_service and key_service.credentials:\n secret = key_service.credentials.get\nelse:\n secret = env\n\n# # # ###\n# Values obtained externally #\n# # # ###\n\npath = Path(__file__)\n\nenv_db_url = env.dj_db_url(\"DATABASE_URL\")\nenv_debug = env.bool(\"DJANGO_DEBUG\", default=False)\nenv_log_level = env.str(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\nenv_base_url = env.str(\"DJANGO_BASE_URL\")\n\nsecret_login_key = b64decode(secret(\"DJANGO_SECRET_LOGIN_KEY\", \"\"))\nsecret_key = secret(\"DJANGO_SECRET_KEY\")\n\nsecret_aws_ses_key_id = secret(\"AWS_ACCESS_KEY_ID\", None)\nsecret_aws_ses_key = secret(\"AWS_SECRET_ACCESS_KEY\", None)\n\nsecret_registry_cl_id = secret(\"REGISTRY_CL_ID\")\nsecret_registry_password = secret(\"REGISTRY_PASSWORD\")\nsecret_registry_cert = b64decode(secret(\"REGISTRY_CERT\", \"\"))\nsecret_registry_key = b64decode(secret(\"REGISTRY_KEY\", \"\"))\nsecret_registry_key_passphrase = secret(\"REGISTRY_KEY_PASSPHRASE\", \"\")\nsecret_registry_hostname = secret(\"REGISTRY_HOSTNAME\")\n\n# region: Basic Django Config-----------------------------------------------###\n\n# Build paths inside the project like this: BASE_DIR / \"subdir\".\n# (settings.py is in `src/registrar/config/`: BASE_DIR is `src/`)\nBASE_DIR = path.resolve().parent.parent.parent\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env_debug\n\n\n# Applications are modular pieces of code.\n# They are provided by Django, by third-parties, or by yourself.\n# Installing them here makes them available for execution.\n# Do not access INSTALLED_APPS directly. Use `django.apps.apps` instead.\nINSTALLED_APPS = [\n # Django automatic admin interface reads metadata\n # from database models to provide a quick, model-centric\n # interface where trusted users can manage content\n \"django.contrib.admin\",\n # vv Required by django.contrib.admin vv\n # the \"user\" model! *\\o/*\n \"django.contrib.auth\",\n # generic interface for Django models\n \"django.contrib.contenttypes\",\n # required for CSRF protection and many other things\n \"django.contrib.sessions\",\n # framework for displaying messages to the user\n \"django.contrib.messages\",\n # ^^ Required by django.contrib.admin ^^\n # collects static files from each of your applications\n # (and any other places you specify) into a single location\n # that can easily be served in production\n \"django.contrib.staticfiles\",\n # application used for integrating with Login.gov\n \"djangooidc\",\n # audit logging of changes to models\n \"auditlog\",\n # library to simplify form templating\n \"widget_tweaks\",\n # library for Finite State Machine statuses\n \"django_fsm\",\n # library for phone numbers\n \"phonenumber_field\",\n # let's be sure to install our own application!\n \"registrar\",\n # Our internal API application\n \"api\",\n]\n\n# Middleware are routines for processing web requests.\n# Adding them here turns them \"on\"; Django will perform the\n# specified routines on each incoming request and outgoing response.\nMIDDLEWARE = [\n # django-allow-cidr: enable use of CIDR IP ranges in ALLOWED_HOSTS\n \"allow_cidr.middleware.AllowCIDRMiddleware\",\n # serve static assets in production\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n # provide security enhancements to the request/response cycle\n \"django.middleware.security.SecurityMiddleware\",\n # store and retrieve arbitrary data on a per-site-visitor basis\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n # add a few conveniences for perfectionists, see documentation\n \"django.middleware.common.CommonMiddleware\",\n # add protection against Cross Site Request Forgeries by adding\n # hidden form fields to POST forms and checking requests for the correct value\n \"django.middleware.csrf.CsrfViewMiddleware\",\n # add `user` (the currently-logged-in user) to incoming HttpRequest objects\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # provide framework for displaying messages to the user, see documentation\n \"django.contrib.messages.middleware.MessageMiddleware\",\n # provide clickjacking protection via the X-Frame-Options header\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n # django-csp: enable use of Content-Security-Policy header\n \"csp.middleware.CSPMiddleware\",\n # django-auditlog: obtain the request User for use in logging\n \"auditlog.middleware.AuditlogMiddleware\",\n]\n\n# application object used by Django’s built-in servers (e.g. `runserver`)\nWSGI_APPLICATION = \"registrar.config.wsgi.application\"\n\n# endregion\n# region: Assets and HTML and Caching---------------------------------------###\n\n# https://docs.djangoproject.com/en/4.0/howto/static-files/\n\n\n# Caching is disabled by default.\n# For a low to medium traffic site, caching causes more\n# problems than it solves. Should caching be desired,\n# a reasonable start might be:\n# CACHES = {\n# \"default\": {\n# \"BACKEND\": \"django.core.cache.backends.db.DatabaseCache\",\n# }\n# }\n\n# Absolute path to the directory where `collectstatic`\n# will place static files for deployment.\n# Do not use this directory for permanent storage -\n# it is for Django!\nSTATIC_ROOT = BASE_DIR / \"registrar\" / \"public\"\n\nSTATICFILES_DIRS = [\n BASE_DIR / \"registrar\" / \"assets\",\n]\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [BASE_DIR / \"registrar\" / \"templates\"],\n # look for templates inside installed apps\n # required by django-debug-toolbar\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n # IMPORTANT security setting: escapes HTMLEntities,\n # helping to prevent XSS attacks\n \"autoescape\": True,\n # context processors are callables which return\n # dicts - Django merges them into the context\n # dictionary used to render the templates\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"registrar.context_processors.language_code\",\n \"registrar.context_processors.canonical_path\",\n \"registrar.context_processors.is_demo_site\",\n ],\n },\n },\n]\n\n# Stop using table-based default form renderer which is deprecated\nFORM_RENDERER = \"django.forms.renderers.DjangoDivFormRenderer\"\n\nMESSAGE_STORAGE = \"django.contrib.messages.storage.session.SessionStorage\"\n\n# IS_DEMO_SITE controls whether or not we show our big red \"TEST SITE\" banner\n# underneath the \"this is a real government website\" banner.\nIS_DEMO_SITE = True\n\n# endregion\n# region: Database----------------------------------------------------------###\n\n# Wrap each view in a transaction on the database\n# A decorator can be used for views which have no database activity:\n# from django.db import transaction\n# @transaction.non_atomic_requests\nenv_db_url[\"ATOMIC_REQUESTS\"] = True\n\nDATABASES = {\n # dj-database-url package takes the supplied Postgres connection string\n # and converts it into a dictionary with the correct USER, HOST, etc\n \"default\": env_db_url,\n}\n\n# Specify default field type to use for primary keys\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n\n# Use our user model instead of the default\nAUTH_USER_MODEL = \"registrar.User\"\n\n# endregion\n# region: Email-------------------------------------------------------------###\n\n# Configuration for accessing AWS SES\nAWS_ACCESS_KEY_ID = secret_aws_ses_key_id\nAWS_SECRET_ACCESS_KEY = secret_aws_ses_key\nAWS_REGION = \"us-gov-west-1\"\n# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html#standard-retry-mode\nAWS_RETRY_MODE: Final = \"standard\"\n# base 2 exponential backoff with max of 20 seconds:\nAWS_MAX_ATTEMPTS = 3\nBOTO_CONFIG = Config(retries={\"mode\": AWS_RETRY_MODE, \"max_attempts\": AWS_MAX_ATTEMPTS})\n\n# email address to use for various automated correspondence\n# TODO: pick something sensible here\nDEFAULT_FROM_EMAIL = \"[email protected]\"\n\n# connect to an (external) SMTP server for sending email\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n\n# TODO: configure these when the values are known\n# EMAIL_HOST = \"\"\n# EMAIL_HOST_PASSWORD = \"\"\n# EMAIL_HOST_USER = \"\"\n# EMAIL_PORT = 587\n\n# for mail sent with mail_admins or mail_managers\nEMAIL_SUBJECT_PREFIX = \"[Attn: .gov admin] \"\n\n# use a TLS (secure) connection when talking to the SMTP server\n# TLS generally uses port 587\nEMAIL_USE_TLS = True\n\n# mutually exclusive with EMAIL_USE_TLS = True\n# SSL generally uses port 465\nEMAIL_USE_SSL = False\n\n# timeout in seconds for blocking operations, like the connection attempt\nEMAIL_TIMEOUT = 30\n\n# email address to use for sending error reports\nSERVER_EMAIL = \"[email protected]\"\n\n# endregion\n# region: Headers-----------------------------------------------------------###\n\n# Content-Security-Policy configuration\n# this can be restrictive because we have few external scripts\nallowed_sources = (\"'self'\",)\nCSP_DEFAULT_SRC = allowed_sources\n# Most things fall back to default-src, but these two do not and should be\n# explicitly set\nCSP_FRAME_ANCESTORS = allowed_sources\nCSP_FORM_ACTION = allowed_sources\n\n\n# Content-Length header is set by django.middleware.common.CommonMiddleware\n\n# X-Frame-Options header is set by\n# django.middleware.clickjacking.XFrameOptionsMiddleware\n# and configured in the Security and Privacy section of this file.\n# Strict-Transport-Security is set by django.middleware.security.SecurityMiddleware\n# and configured in the Security and Privacy section of this file.\n\n# prefer contents of X-Forwarded-Host header to Host header\n# as Host header may contain a proxy rather than the actual client\nUSE_X_FORWARDED_HOST = True\n\n# endregion\n# region: Internationalisation----------------------------------------------###\n\n# https://docs.djangoproject.com/en/4.0/topics/i18n/\n\n# Charset to use for HttpResponse objects; used in Content-Type header\nDEFAULT_CHARSET = \"utf-8\"\n\n# provide fallback language if translation file is missing or\n# user's locale is not supported - requires USE_I18N = True\nLANGUAGE_CODE = \"en-us\"\n\n# allows language cookie to be sent if the user\n# is coming to our site from an external page.\nLANGUAGE_COOKIE_SAMESITE = None\n\n# only send via HTTPS connection\nLANGUAGE_COOKIE_SECURE = True\n\n# to display datetimes in templates\n# and to interpret datetimes entered in forms\nTIME_ZONE = \"UTC\"\n\n# enable Django’s translation system\nUSE_I18N = True\n\n# enable localized formatting of numbers and dates\nUSE_L10N = True\n\n# make datetimes timezone-aware by default\nUSE_TZ = True\n\n# setting for phonenumber library\nPHONENUMBER_DEFAULT_REGION = \"US\"\n\n# endregion\n# region: Logging-----------------------------------------------------------###\n\n# A Python logging configuration consists of four parts:\n# Loggers\n# Handlers\n# Filters\n# Formatters\n# https://docs.djangoproject.com/en/4.1/topics/logging/\n\n# Log a message by doing this:\n#\n# import logging\n# logger = logging.getLogger(__name__)\n#\n# Then:\n#\n# logger.debug(\"We're about to execute function xyz. Wish us luck!\")\n# logger.info(\"Oh! Here's something you might want to know.\")\n# logger.warning(\"Something kinda bad happened.\")\n# logger.error(\"Can't do this important task. Something is very wrong.\")\n# logger.critical(\"Going to crash now.\")\n\nLOGGING = {\n \"version\": 1,\n # Don't import Django's existing loggers\n \"disable_existing_loggers\": True,\n # define how to convert log messages into text;\n # each handler has its choice of format\n \"formatters\": {\n \"verbose\": {\n \"format\": \"[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] \"\n \"%(message)s\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n },\n \"simple\": {\n \"format\": \"%(levelname)s %(message)s\",\n },\n \"django.server\": {\n \"()\": \"django.utils.log.ServerFormatter\",\n \"format\": \"[{server_time}] {message}\",\n \"style\": \"{\",\n },\n },\n # define where log messages will be sent;\n # each logger can have one or more handlers\n \"handlers\": {\n \"console\": {\n \"level\": env_log_level,\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"django.server\": {\n \"level\": \"INFO\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"django.server\",\n },\n # No file logger is configured,\n # because containerized apps\n # do not log to the file system.\n },\n # define loggers: these are \"sinks\" into which\n # messages are sent for processing\n \"loggers\": {\n # Django's generic logger\n \"django\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's template processor\n \"django.template\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's runserver\n \"django.server\": {\n \"handlers\": [\"django.server\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's runserver requests\n \"django.request\": {\n \"handlers\": [\"django.server\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # OpenID Connect logger\n \"oic\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django wrapper for OpenID Connect\n \"djangooidc\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Our app!\n \"registrar\": {\n \"handlers\": [\"console\"],\n \"level\": \"DEBUG\",\n \"propagate\": False,\n },\n },\n # root logger catches anything, unless\n # defined by a more specific logger\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n },\n}\n\n# endregion\n# region: Login-------------------------------------------------------------###\n\n# list of Python classes used when trying to authenticate a user\nAUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.ModelBackend\",\n \"djangooidc.backends.OpenIdConnectBackend\",\n]\n\n# this is where unauthenticated requests are redirected when using\n# the login_required() decorator, LoginRequiredMixin, or AccessMixin\nLOGIN_URL = \"/openid/login\"\n\n# where to go after logging out\nLOGOUT_REDIRECT_URL = \"home\"\n\n# disable dynamic client registration,\n# only the OP inside OIDC_PROVIDERS will be available\nOIDC_ALLOW_DYNAMIC_OP = False\n\n# which provider to use if multiple are available\n# (code does not currently support user selection)\nOIDC_ACTIVE_PROVIDER = \"login.gov\"\n\n\nOIDC_PROVIDERS = {\n \"login.gov\": {\n \"srv_discovery_url\": \"https://idp.int.identitysandbox.gov\",\n \"behaviour\": {\n # the 'code' workflow requires direct connectivity from us to Login.gov\n \"response_type\": \"code\",\n \"scope\": [\"email\", \"profile:name\", \"phone\"],\n \"user_info_request\": [\"email\", \"first_name\", \"last_name\", \"phone\"],\n \"acr_value\": \"http://idmanagement.gov/ns/assurance/ial/2\",\n },\n \"client_registration\": {\n \"client_id\": \"cisa_dotgov_registrar\",\n \"redirect_uris\": [f\"{env_base_url}/openid/callback/login/\"],\n \"post_logout_redirect_uris\": [f\"{env_base_url}/openid/callback/logout/\"],\n \"token_endpoint_auth_method\": [\"private_key_jwt\"],\n \"sp_private_key\": secret_login_key,\n },\n }\n}\n\n# endregion\n# region: Routing-----------------------------------------------------------###\n\n# ~ Set by django.middleware.common.CommonMiddleware\n# APPEND_SLASH = True\n# PREPEND_WWW = False\n\n# full Python import path to the root URLconf\nROOT_URLCONF = \"registrar.config.urls\"\n\n# URL to use when referring to static files located in STATIC_ROOT\n# Must be relative and end with \"/\"\nSTATIC_URL = \"public/\"\n\n# endregion\n# region: Registry----------------------------------------------------------###\n\n# SECURITY WARNING: keep all registry variables in production secret!\nSECRET_REGISTRY_CL_ID = secret_registry_cl_id\nSECRET_REGISTRY_PASSWORD = secret_registry_password\nSECRET_REGISTRY_CERT = secret_registry_cert\nSECRET_REGISTRY_KEY = secret_registry_key\nSECRET_REGISTRY_KEY_PASSPHRASE = secret_registry_key_passphrase\nSECRET_REGISTRY_HOSTNAME = secret_registry_hostname\n\n# endregion\n# region: Security and Privacy----------------------------------------------###\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = secret_key\n\n# Use this variable for doing SECRET_KEY rotation, see documentation\nSECRET_KEY_FALLBACKS: \"list[str]\" = []\n\n# ~ Set by django.middleware.security.SecurityMiddleware\n# SECURE_CONTENT_TYPE_NOSNIFF = True\n# SECURE_CROSS_ORIGIN_OPENER_POLICY = \"same-origin\"\n# SECURE_REDIRECT_EXEMPT = []\n# SECURE_REFERRER_POLICY = \"same-origin\"\n# SECURE_SSL_HOST = None\n\n# ~ Overridden from django.middleware.security.SecurityMiddleware\n# adds the includeSubDomains directive to the HTTP Strict Transport Security header\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\n# adds the preload directive to the HTTP Strict Transport Security header\nSECURE_HSTS_PRELOAD = True\n# TODO: set this value to 31536000 (1 year) for production\nSECURE_HSTS_SECONDS = 300\n# redirect all non-HTTPS requests to HTTPS\nSECURE_SSL_REDIRECT = True\n\n# ~ Set by django.middleware.common.CommonMiddleware\n# DISALLOWED_USER_AGENTS = []\n\n# The host/domain names that Django can serve.\n# This is a security measure to prevent HTTP Host header attacks,\n# which are possible even under many seemingly-safe\n# web server configurations.\nALLOWED_HOSTS = [\n \"getgov-stable.app.cloud.gov\",\n \"getgov-ab.app.cloud.gov\",\n \"getgov-bl.app.cloud.gov\",\n \"getgov-rjm.app.cloud.gov\",\n \"getgov-jon.app.cloud.gov\",\n \"getgov-mr.app.cloud.gov\",\n \"getgov-sspj.app.cloud.gov\",\n \"getgov-nmb.app.cloud.gov\",\n \"getgov-ik.app.cloud.gov\",\n \"get.gov\",\n]\n\n# Extend ALLOWED_HOSTS.\n# IP addresses can also be hosts, which are used by internal\n# load balancers for health checks, etc.\nALLOWED_CIDR_NETS = [\"10.0.0.0/8\"]\n\n# ~ Below are some protections from cross-site request forgery.\n# This is canonically done by including a nonce value\n# in pages sent to the user, which the user is expected\n# to send back. The specifics of implementation are\n# intricate and varied.\n\n# Store the token server-side, do not send it\n# to the user via a cookie. This means each page\n# which requires protection must place the token\n# in the HTML explicitly, otherwise the user will\n# get a 403 error when they submit.\nCSRF_USE_SESSIONS = True\n\n# Expiry of CSRF cookie, in seconds.\n# None means \"use session-based CSRF cookies\".\nCSRF_COOKIE_AGE = None\n\n# Prevent JavaScript from reading the CSRF cookie.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_HTTPONLY = True\n\n# Only send the cookie via HTTPS connections.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_SECURE = True\n\n# Protect from non-targeted attacks by obscuring\n# the CSRF cookie name from the default.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_NAME = \"CrSiReFo\"\n\n# Prevents CSRF cookie from being sent if the user\n# is coming to our site from an external page.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_SAMESITE = \"Strict\"\n\n# Change header name to match cookie name.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_HEADER_NAME = \"HTTP_X_CRSIREFO\"\n\n# Max parameters that may be received via GET or POST\n# TODO: 1000 is the default, may need to tune upward for\n# large DNS zone files, if records are represented by\n# individual form fields.\nDATA_UPLOAD_MAX_NUMBER_FIELDS = 1000\n\n# age of session cookies, in seconds (28800 = 8 hours)\nSESSION_COOKIE_AGE = 28800\n\n# instruct the browser to forbid client-side JavaScript\n# from accessing the cookie\nSESSION_COOKIE_HTTPONLY = True\n\n# are we a spring boot application? who knows!\nSESSION_COOKIE_NAME = \"JSESSIONID\"\n\n# Allows session cookie to be sent if the user\n# is coming to our site from an external page\n# unless it is via \"risky\" paths, i.e. POST requests\nSESSION_COOKIE_SAMESITE = \"Lax\"\n\n# instruct browser to only send cookie via HTTPS\nSESSION_COOKIE_SECURE = True\n\n# ~ Set by django.middleware.clickjacking.XFrameOptionsMiddleware\n# prevent clickjacking by instructing the browser not to load\n# our site within an iframe\n# X_FRAME_OPTIONS = \"Deny\"\n\n# endregion\n# region: Testing-----------------------------------------------------------###\n\n# Additional directories searched for fixture files.\n# The fixtures directory of each application is searched by default.\n# Must use unix style \"/\" path separators.\nFIXTURE_DIRS: \"list[str]\" = []\n\n# endregion\n\n\n# # # ###\n# Development settings #\n# # # ###\n\nif DEBUG:\n # used by debug() context processor\n INTERNAL_IPS = [\n \"127.0.0.1\",\n \"::1\",\n ]\n\n # allow dev laptop and docker-compose network to connect\n ALLOWED_HOSTS += (\"localhost\", \"app\")\n SECURE_SSL_REDIRECT = False\n SECURE_HSTS_PRELOAD = False\n\n # discover potentially inefficient database queries\n # TODO: use settings overrides to ensure this always is True during tests\n INSTALLED_APPS += (\"nplusone.ext.django\",)\n MIDDLEWARE += (\"nplusone.ext.django.NPlusOneMiddleware\",)\n # turned off for now, because django-auditlog has some issues\n NPLUSONE_RAISE = False\n NPLUSONE_WHITELIST = [\n {\"model\": \"admin.LogEntry\", \"field\": \"user\"},\n ]\n\n # insert the amazing django-debug-toolbar\n INSTALLED_APPS += (\"debug_toolbar\",)\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\")\n\n DEBUG_TOOLBAR_CONFIG = {\n # due to Docker, bypass Debug Toolbar's check on INTERNAL_IPS\n \"SHOW_TOOLBAR_CALLBACK\": lambda _: True,\n }\n",
"path": "src/registrar/config/settings.py"
}
] | [
{
"content": "\"\"\"\nDjango settings for .gov registrar project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/4.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/4.0/ref/settings/\n\nIF you'd like to see all of these settings in the running app:\n\n```shell\n$ docker-compose exec app python manage.py shell\n>>> from django.conf import settings\n>>> dir(settings)\n```\n\n\"\"\"\nimport environs\nfrom base64 import b64decode\nfrom cfenv import AppEnv # type: ignore\nfrom pathlib import Path\nfrom typing import Final\n\nfrom botocore.config import Config\n\n# # # ###\n# Setup code goes here #\n# # # ###\n\nenv = environs.Env()\n\n# Get secrets from Cloud.gov user provided service, if exists\n# If not, get secrets from environment variables\nkey_service = AppEnv().get_service(name=\"getgov-credentials\")\nif key_service and key_service.credentials:\n secret = key_service.credentials.get\nelse:\n secret = env\n\n# # # ###\n# Values obtained externally #\n# # # ###\n\npath = Path(__file__)\n\nenv_db_url = env.dj_db_url(\"DATABASE_URL\")\nenv_debug = env.bool(\"DJANGO_DEBUG\", default=False)\nenv_log_level = env.str(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\nenv_base_url = env.str(\"DJANGO_BASE_URL\")\n\nsecret_login_key = b64decode(secret(\"DJANGO_SECRET_LOGIN_KEY\", \"\"))\nsecret_key = secret(\"DJANGO_SECRET_KEY\")\n\nsecret_aws_ses_key_id = secret(\"AWS_ACCESS_KEY_ID\", None)\nsecret_aws_ses_key = secret(\"AWS_SECRET_ACCESS_KEY\", None)\n\nsecret_registry_cl_id = secret(\"REGISTRY_CL_ID\")\nsecret_registry_password = secret(\"REGISTRY_PASSWORD\")\nsecret_registry_cert = b64decode(secret(\"REGISTRY_CERT\", \"\"))\nsecret_registry_key = b64decode(secret(\"REGISTRY_KEY\", \"\"))\nsecret_registry_key_passphrase = secret(\"REGISTRY_KEY_PASSPHRASE\", \"\")\nsecret_registry_hostname = secret(\"REGISTRY_HOSTNAME\")\n\n# region: Basic Django Config-----------------------------------------------###\n\n# Build paths inside the project like this: BASE_DIR / \"subdir\".\n# (settings.py is in `src/registrar/config/`: BASE_DIR is `src/`)\nBASE_DIR = path.resolve().parent.parent.parent\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env_debug\n\n\n# Applications are modular pieces of code.\n# They are provided by Django, by third-parties, or by yourself.\n# Installing them here makes them available for execution.\n# Do not access INSTALLED_APPS directly. Use `django.apps.apps` instead.\nINSTALLED_APPS = [\n # Django automatic admin interface reads metadata\n # from database models to provide a quick, model-centric\n # interface where trusted users can manage content\n \"django.contrib.admin\",\n # vv Required by django.contrib.admin vv\n # the \"user\" model! *\\o/*\n \"django.contrib.auth\",\n # generic interface for Django models\n \"django.contrib.contenttypes\",\n # required for CSRF protection and many other things\n \"django.contrib.sessions\",\n # framework for displaying messages to the user\n \"django.contrib.messages\",\n # ^^ Required by django.contrib.admin ^^\n # collects static files from each of your applications\n # (and any other places you specify) into a single location\n # that can easily be served in production\n \"django.contrib.staticfiles\",\n # application used for integrating with Login.gov\n \"djangooidc\",\n # audit logging of changes to models\n \"auditlog\",\n # library to simplify form templating\n \"widget_tweaks\",\n # library for Finite State Machine statuses\n \"django_fsm\",\n # library for phone numbers\n \"phonenumber_field\",\n # let's be sure to install our own application!\n \"registrar\",\n # Our internal API application\n \"api\",\n # Only for generating documentation, uncomment to run manage.py generate_puml\n # \"puml_generator\",\n]\n\n# Middleware are routines for processing web requests.\n# Adding them here turns them \"on\"; Django will perform the\n# specified routines on each incoming request and outgoing response.\nMIDDLEWARE = [\n # django-allow-cidr: enable use of CIDR IP ranges in ALLOWED_HOSTS\n \"allow_cidr.middleware.AllowCIDRMiddleware\",\n # serve static assets in production\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n # provide security enhancements to the request/response cycle\n \"django.middleware.security.SecurityMiddleware\",\n # store and retrieve arbitrary data on a per-site-visitor basis\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n # add a few conveniences for perfectionists, see documentation\n \"django.middleware.common.CommonMiddleware\",\n # add protection against Cross Site Request Forgeries by adding\n # hidden form fields to POST forms and checking requests for the correct value\n \"django.middleware.csrf.CsrfViewMiddleware\",\n # add `user` (the currently-logged-in user) to incoming HttpRequest objects\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # provide framework for displaying messages to the user, see documentation\n \"django.contrib.messages.middleware.MessageMiddleware\",\n # provide clickjacking protection via the X-Frame-Options header\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n # django-csp: enable use of Content-Security-Policy header\n \"csp.middleware.CSPMiddleware\",\n # django-auditlog: obtain the request User for use in logging\n \"auditlog.middleware.AuditlogMiddleware\",\n]\n\n# application object used by Django’s built-in servers (e.g. `runserver`)\nWSGI_APPLICATION = \"registrar.config.wsgi.application\"\n\n# endregion\n# region: Assets and HTML and Caching---------------------------------------###\n\n# https://docs.djangoproject.com/en/4.0/howto/static-files/\n\n\n# Caching is disabled by default.\n# For a low to medium traffic site, caching causes more\n# problems than it solves. Should caching be desired,\n# a reasonable start might be:\n# CACHES = {\n# \"default\": {\n# \"BACKEND\": \"django.core.cache.backends.db.DatabaseCache\",\n# }\n# }\n\n# Absolute path to the directory where `collectstatic`\n# will place static files for deployment.\n# Do not use this directory for permanent storage -\n# it is for Django!\nSTATIC_ROOT = BASE_DIR / \"registrar\" / \"public\"\n\nSTATICFILES_DIRS = [\n BASE_DIR / \"registrar\" / \"assets\",\n]\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [BASE_DIR / \"registrar\" / \"templates\"],\n # look for templates inside installed apps\n # required by django-debug-toolbar\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n # IMPORTANT security setting: escapes HTMLEntities,\n # helping to prevent XSS attacks\n \"autoescape\": True,\n # context processors are callables which return\n # dicts - Django merges them into the context\n # dictionary used to render the templates\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"registrar.context_processors.language_code\",\n \"registrar.context_processors.canonical_path\",\n \"registrar.context_processors.is_demo_site\",\n ],\n },\n },\n]\n\n# Stop using table-based default form renderer which is deprecated\nFORM_RENDERER = \"django.forms.renderers.DjangoDivFormRenderer\"\n\nMESSAGE_STORAGE = \"django.contrib.messages.storage.session.SessionStorage\"\n\n# IS_DEMO_SITE controls whether or not we show our big red \"TEST SITE\" banner\n# underneath the \"this is a real government website\" banner.\nIS_DEMO_SITE = True\n\n# endregion\n# region: Database----------------------------------------------------------###\n\n# Wrap each view in a transaction on the database\n# A decorator can be used for views which have no database activity:\n# from django.db import transaction\n# @transaction.non_atomic_requests\nenv_db_url[\"ATOMIC_REQUESTS\"] = True\n\nDATABASES = {\n # dj-database-url package takes the supplied Postgres connection string\n # and converts it into a dictionary with the correct USER, HOST, etc\n \"default\": env_db_url,\n}\n\n# Specify default field type to use for primary keys\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n\n# Use our user model instead of the default\nAUTH_USER_MODEL = \"registrar.User\"\n\n# endregion\n# region: Email-------------------------------------------------------------###\n\n# Configuration for accessing AWS SES\nAWS_ACCESS_KEY_ID = secret_aws_ses_key_id\nAWS_SECRET_ACCESS_KEY = secret_aws_ses_key\nAWS_REGION = \"us-gov-west-1\"\n# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html#standard-retry-mode\nAWS_RETRY_MODE: Final = \"standard\"\n# base 2 exponential backoff with max of 20 seconds:\nAWS_MAX_ATTEMPTS = 3\nBOTO_CONFIG = Config(retries={\"mode\": AWS_RETRY_MODE, \"max_attempts\": AWS_MAX_ATTEMPTS})\n\n# email address to use for various automated correspondence\n# TODO: pick something sensible here\nDEFAULT_FROM_EMAIL = \"[email protected]\"\n\n# connect to an (external) SMTP server for sending email\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n\n# TODO: configure these when the values are known\n# EMAIL_HOST = \"\"\n# EMAIL_HOST_PASSWORD = \"\"\n# EMAIL_HOST_USER = \"\"\n# EMAIL_PORT = 587\n\n# for mail sent with mail_admins or mail_managers\nEMAIL_SUBJECT_PREFIX = \"[Attn: .gov admin] \"\n\n# use a TLS (secure) connection when talking to the SMTP server\n# TLS generally uses port 587\nEMAIL_USE_TLS = True\n\n# mutually exclusive with EMAIL_USE_TLS = True\n# SSL generally uses port 465\nEMAIL_USE_SSL = False\n\n# timeout in seconds for blocking operations, like the connection attempt\nEMAIL_TIMEOUT = 30\n\n# email address to use for sending error reports\nSERVER_EMAIL = \"[email protected]\"\n\n# endregion\n# region: Headers-----------------------------------------------------------###\n\n# Content-Security-Policy configuration\n# this can be restrictive because we have few external scripts\nallowed_sources = (\"'self'\",)\nCSP_DEFAULT_SRC = allowed_sources\n# Most things fall back to default-src, but these two do not and should be\n# explicitly set\nCSP_FRAME_ANCESTORS = allowed_sources\nCSP_FORM_ACTION = allowed_sources\n\n\n# Content-Length header is set by django.middleware.common.CommonMiddleware\n\n# X-Frame-Options header is set by\n# django.middleware.clickjacking.XFrameOptionsMiddleware\n# and configured in the Security and Privacy section of this file.\n# Strict-Transport-Security is set by django.middleware.security.SecurityMiddleware\n# and configured in the Security and Privacy section of this file.\n\n# prefer contents of X-Forwarded-Host header to Host header\n# as Host header may contain a proxy rather than the actual client\nUSE_X_FORWARDED_HOST = True\n\n# endregion\n# region: Internationalisation----------------------------------------------###\n\n# https://docs.djangoproject.com/en/4.0/topics/i18n/\n\n# Charset to use for HttpResponse objects; used in Content-Type header\nDEFAULT_CHARSET = \"utf-8\"\n\n# provide fallback language if translation file is missing or\n# user's locale is not supported - requires USE_I18N = True\nLANGUAGE_CODE = \"en-us\"\n\n# allows language cookie to be sent if the user\n# is coming to our site from an external page.\nLANGUAGE_COOKIE_SAMESITE = None\n\n# only send via HTTPS connection\nLANGUAGE_COOKIE_SECURE = True\n\n# to display datetimes in templates\n# and to interpret datetimes entered in forms\nTIME_ZONE = \"UTC\"\n\n# enable Django’s translation system\nUSE_I18N = True\n\n# enable localized formatting of numbers and dates\nUSE_L10N = True\n\n# make datetimes timezone-aware by default\nUSE_TZ = True\n\n# setting for phonenumber library\nPHONENUMBER_DEFAULT_REGION = \"US\"\n\n# endregion\n# region: Logging-----------------------------------------------------------###\n\n# A Python logging configuration consists of four parts:\n# Loggers\n# Handlers\n# Filters\n# Formatters\n# https://docs.djangoproject.com/en/4.1/topics/logging/\n\n# Log a message by doing this:\n#\n# import logging\n# logger = logging.getLogger(__name__)\n#\n# Then:\n#\n# logger.debug(\"We're about to execute function xyz. Wish us luck!\")\n# logger.info(\"Oh! Here's something you might want to know.\")\n# logger.warning(\"Something kinda bad happened.\")\n# logger.error(\"Can't do this important task. Something is very wrong.\")\n# logger.critical(\"Going to crash now.\")\n\nLOGGING = {\n \"version\": 1,\n # Don't import Django's existing loggers\n \"disable_existing_loggers\": True,\n # define how to convert log messages into text;\n # each handler has its choice of format\n \"formatters\": {\n \"verbose\": {\n \"format\": \"[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] \"\n \"%(message)s\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n },\n \"simple\": {\n \"format\": \"%(levelname)s %(message)s\",\n },\n \"django.server\": {\n \"()\": \"django.utils.log.ServerFormatter\",\n \"format\": \"[{server_time}] {message}\",\n \"style\": \"{\",\n },\n },\n # define where log messages will be sent;\n # each logger can have one or more handlers\n \"handlers\": {\n \"console\": {\n \"level\": env_log_level,\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"django.server\": {\n \"level\": \"INFO\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"django.server\",\n },\n # No file logger is configured,\n # because containerized apps\n # do not log to the file system.\n },\n # define loggers: these are \"sinks\" into which\n # messages are sent for processing\n \"loggers\": {\n # Django's generic logger\n \"django\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's template processor\n \"django.template\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's runserver\n \"django.server\": {\n \"handlers\": [\"django.server\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's runserver requests\n \"django.request\": {\n \"handlers\": [\"django.server\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # OpenID Connect logger\n \"oic\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django wrapper for OpenID Connect\n \"djangooidc\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Our app!\n \"registrar\": {\n \"handlers\": [\"console\"],\n \"level\": \"DEBUG\",\n \"propagate\": False,\n },\n },\n # root logger catches anything, unless\n # defined by a more specific logger\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n },\n}\n\n# endregion\n# region: Login-------------------------------------------------------------###\n\n# list of Python classes used when trying to authenticate a user\nAUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.ModelBackend\",\n \"djangooidc.backends.OpenIdConnectBackend\",\n]\n\n# this is where unauthenticated requests are redirected when using\n# the login_required() decorator, LoginRequiredMixin, or AccessMixin\nLOGIN_URL = \"/openid/login\"\n\n# where to go after logging out\nLOGOUT_REDIRECT_URL = \"home\"\n\n# disable dynamic client registration,\n# only the OP inside OIDC_PROVIDERS will be available\nOIDC_ALLOW_DYNAMIC_OP = False\n\n# which provider to use if multiple are available\n# (code does not currently support user selection)\nOIDC_ACTIVE_PROVIDER = \"login.gov\"\n\n\nOIDC_PROVIDERS = {\n \"login.gov\": {\n \"srv_discovery_url\": \"https://idp.int.identitysandbox.gov\",\n \"behaviour\": {\n # the 'code' workflow requires direct connectivity from us to Login.gov\n \"response_type\": \"code\",\n \"scope\": [\"email\", \"profile:name\", \"phone\"],\n \"user_info_request\": [\"email\", \"first_name\", \"last_name\", \"phone\"],\n \"acr_value\": \"http://idmanagement.gov/ns/assurance/ial/2\",\n },\n \"client_registration\": {\n \"client_id\": \"cisa_dotgov_registrar\",\n \"redirect_uris\": [f\"{env_base_url}/openid/callback/login/\"],\n \"post_logout_redirect_uris\": [f\"{env_base_url}/openid/callback/logout/\"],\n \"token_endpoint_auth_method\": [\"private_key_jwt\"],\n \"sp_private_key\": secret_login_key,\n },\n }\n}\n\n# endregion\n# region: Routing-----------------------------------------------------------###\n\n# ~ Set by django.middleware.common.CommonMiddleware\n# APPEND_SLASH = True\n# PREPEND_WWW = False\n\n# full Python import path to the root URLconf\nROOT_URLCONF = \"registrar.config.urls\"\n\n# URL to use when referring to static files located in STATIC_ROOT\n# Must be relative and end with \"/\"\nSTATIC_URL = \"public/\"\n\n# endregion\n# region: Registry----------------------------------------------------------###\n\n# SECURITY WARNING: keep all registry variables in production secret!\nSECRET_REGISTRY_CL_ID = secret_registry_cl_id\nSECRET_REGISTRY_PASSWORD = secret_registry_password\nSECRET_REGISTRY_CERT = secret_registry_cert\nSECRET_REGISTRY_KEY = secret_registry_key\nSECRET_REGISTRY_KEY_PASSPHRASE = secret_registry_key_passphrase\nSECRET_REGISTRY_HOSTNAME = secret_registry_hostname\n\n# endregion\n# region: Security and Privacy----------------------------------------------###\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = secret_key\n\n# Use this variable for doing SECRET_KEY rotation, see documentation\nSECRET_KEY_FALLBACKS: \"list[str]\" = []\n\n# ~ Set by django.middleware.security.SecurityMiddleware\n# SECURE_CONTENT_TYPE_NOSNIFF = True\n# SECURE_CROSS_ORIGIN_OPENER_POLICY = \"same-origin\"\n# SECURE_REDIRECT_EXEMPT = []\n# SECURE_REFERRER_POLICY = \"same-origin\"\n# SECURE_SSL_HOST = None\n\n# ~ Overridden from django.middleware.security.SecurityMiddleware\n# adds the includeSubDomains directive to the HTTP Strict Transport Security header\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\n# adds the preload directive to the HTTP Strict Transport Security header\nSECURE_HSTS_PRELOAD = True\n# TODO: set this value to 31536000 (1 year) for production\nSECURE_HSTS_SECONDS = 300\n# redirect all non-HTTPS requests to HTTPS\nSECURE_SSL_REDIRECT = True\n\n# ~ Set by django.middleware.common.CommonMiddleware\n# DISALLOWED_USER_AGENTS = []\n\n# The host/domain names that Django can serve.\n# This is a security measure to prevent HTTP Host header attacks,\n# which are possible even under many seemingly-safe\n# web server configurations.\nALLOWED_HOSTS = [\n \"getgov-stable.app.cloud.gov\",\n \"getgov-ab.app.cloud.gov\",\n \"getgov-bl.app.cloud.gov\",\n \"getgov-rjm.app.cloud.gov\",\n \"getgov-jon.app.cloud.gov\",\n \"getgov-mr.app.cloud.gov\",\n \"getgov-sspj.app.cloud.gov\",\n \"getgov-nmb.app.cloud.gov\",\n \"getgov-ik.app.cloud.gov\",\n \"get.gov\",\n]\n\n# Extend ALLOWED_HOSTS.\n# IP addresses can also be hosts, which are used by internal\n# load balancers for health checks, etc.\nALLOWED_CIDR_NETS = [\"10.0.0.0/8\"]\n\n# ~ Below are some protections from cross-site request forgery.\n# This is canonically done by including a nonce value\n# in pages sent to the user, which the user is expected\n# to send back. The specifics of implementation are\n# intricate and varied.\n\n# Store the token server-side, do not send it\n# to the user via a cookie. This means each page\n# which requires protection must place the token\n# in the HTML explicitly, otherwise the user will\n# get a 403 error when they submit.\nCSRF_USE_SESSIONS = True\n\n# Expiry of CSRF cookie, in seconds.\n# None means \"use session-based CSRF cookies\".\nCSRF_COOKIE_AGE = None\n\n# Prevent JavaScript from reading the CSRF cookie.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_HTTPONLY = True\n\n# Only send the cookie via HTTPS connections.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_SECURE = True\n\n# Protect from non-targeted attacks by obscuring\n# the CSRF cookie name from the default.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_NAME = \"CrSiReFo\"\n\n# Prevents CSRF cookie from being sent if the user\n# is coming to our site from an external page.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_SAMESITE = \"Strict\"\n\n# Change header name to match cookie name.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_HEADER_NAME = \"HTTP_X_CRSIREFO\"\n\n# Max parameters that may be received via GET or POST\n# TODO: 1000 is the default, may need to tune upward for\n# large DNS zone files, if records are represented by\n# individual form fields.\nDATA_UPLOAD_MAX_NUMBER_FIELDS = 1000\n\n# age of session cookies, in seconds (28800 = 8 hours)\nSESSION_COOKIE_AGE = 28800\n\n# instruct the browser to forbid client-side JavaScript\n# from accessing the cookie\nSESSION_COOKIE_HTTPONLY = True\n\n# are we a spring boot application? who knows!\nSESSION_COOKIE_NAME = \"JSESSIONID\"\n\n# Allows session cookie to be sent if the user\n# is coming to our site from an external page\n# unless it is via \"risky\" paths, i.e. POST requests\nSESSION_COOKIE_SAMESITE = \"Lax\"\n\n# instruct browser to only send cookie via HTTPS\nSESSION_COOKIE_SECURE = True\n\n# ~ Set by django.middleware.clickjacking.XFrameOptionsMiddleware\n# prevent clickjacking by instructing the browser not to load\n# our site within an iframe\n# X_FRAME_OPTIONS = \"Deny\"\n\n# endregion\n# region: Testing-----------------------------------------------------------###\n\n# Additional directories searched for fixture files.\n# The fixtures directory of each application is searched by default.\n# Must use unix style \"/\" path separators.\nFIXTURE_DIRS: \"list[str]\" = []\n\n# endregion\n\n\n# # # ###\n# Development settings #\n# # # ###\n\nif DEBUG:\n # used by debug() context processor\n INTERNAL_IPS = [\n \"127.0.0.1\",\n \"::1\",\n ]\n\n # allow dev laptop and docker-compose network to connect\n ALLOWED_HOSTS += (\"localhost\", \"app\")\n SECURE_SSL_REDIRECT = False\n SECURE_HSTS_PRELOAD = False\n\n # discover potentially inefficient database queries\n # TODO: use settings overrides to ensure this always is True during tests\n INSTALLED_APPS += (\"nplusone.ext.django\",)\n MIDDLEWARE += (\"nplusone.ext.django.NPlusOneMiddleware\",)\n # turned off for now, because django-auditlog has some issues\n NPLUSONE_RAISE = False\n NPLUSONE_WHITELIST = [\n {\"model\": \"admin.LogEntry\", \"field\": \"user\"},\n ]\n\n # insert the amazing django-debug-toolbar\n INSTALLED_APPS += (\"debug_toolbar\",)\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\")\n\n DEBUG_TOOLBAR_CONFIG = {\n # due to Docker, bypass Debug Toolbar's check on INTERNAL_IPS\n \"SHOW_TOOLBAR_CALLBACK\": lambda _: True,\n }\n",
"path": "src/registrar/config/settings.py"
}
] | diff --git a/docs/architecture/diagrams/model_timeline.md b/docs/architecture/diagrams/model_timeline.md
new file mode 100644
index 000000000..f2089ce55
--- /dev/null
+++ b/docs/architecture/diagrams/model_timeline.md
@@ -0,0 +1,149 @@
+# Data Model Timeline
+
+This diagram connects the data models along with various workflow stages.
+
+1. The applicant starts the process at `/register` interacting with the
+ `DomainApplication` object.
+
+2. The analyst approves the application using the `DomainApplication`'s
+ `approve()` method which creates many related objects: `UserDomainRole`,
+ `Domain`, and `DomainInformation`.
+
+3. After the domain is approved, users interact with various
+ `/domain/<id>/...` views which make changes to the `Domain`,
+ `DomainInformation`, and `UserDomainRole` models. For inviting new users,
+ there is a `DomainInvitation` model that allows people to be added to
+ domains who are not already users.
+
+A more complete diagram of the data models, their fields, and their
+relationships are in [models_diagram.md](./models_diagram.md), created with
+the `django-model2puml` plugin.
+
+
+
+<details>
+<summary>PlantUML source code</summary>
+To regenerate this image using Docker, run
+
+```bash
+$ docker run -v $(pwd):$(pwd) -w $(pwd) -it plantuml/plantuml -tsvg model_timeline.md
+```
+
+
+```plantuml
+@startuml
+
+allowmixing
+left to right direction
+
+class DomainApplication {
+ Application for a domain
+ --
+ creator (User)
+ investigator (User)
+ authorizing_official (Contact)
+ submitter (Contact)
+ other_contacts (Contacts)
+ requested_domain (Domain)
+ current_websites (Websites)
+ alternative_domains (Websites)
+ --
+ Request information...
+}
+
+class User {
+ Django's user class
+ --
+ ...
+ --
+}
+note left of User
+ Created by DjangoOIDC
+ when users arrive back
+ from Login.gov
+
+ <b>username</b> is the Login UUID
+end note
+
+DomainApplication -l- User : creator, investigator
+
+class Contact {
+ Contact info for a person
+ --
+ first_name
+ middle_name
+ last_name
+ title
+ email
+ phone
+ --
+}
+
+DomainApplication *-r-* Contact : authorizing_official, submitter, other_contacts
+
+class Domain {
+ Approved domain
+ --
+ name
+ is_active
+ --
+ <b>EPP methods</b>
+}
+
+DomainApplication .right[#blue].> Domain : approve()
+
+class DomainInformation {
+ Registrar information on a domain
+ --
+ domain (Domain)
+ domain_application (DomainApplication)
+ security_email
+ --
+ Request information...
+}
+
+DomainInformation -- Domain
+DomainInformation -- DomainApplication
+DomainApplication .[#blue].> DomainInformation : approve()
+
+class UserDomainRole {
+ Permissions
+ --
+ domain (Domain)
+ user (User)
+ role="ADMIN"
+ --
+}
+UserDomainRole -- User
+UserDomainRole -- Domain
+DomainApplication .[#blue].> UserDomainRole : approve()
+
+class DomainInvitation {
+ Email invitations sent
+ --
+ email
+ domain (Domain)
+ status
+ --
+}
+DomainInvitation -- Domain
+DomainInvitation .[#green].> UserDomainRole : User.first_login()
+
+actor applicant #Red
+applicant -d-> DomainApplication : **/register**
+
+actor analyst #Blue
+analyst -[#blue]-> DomainApplication : **approve()**
+
+actor user1 #Green
+user1 -[#green]-> Domain : **/domain/<id>/nameservers**
+actor user2 #Green
+user2 -[#green]-> DomainInformation : **/domain/<id>/?????**
+actor user3 #Green
+user3 -right[#green]-> UserDomainRole : **/domain/<id>/users/add**
+user3 -right[#green]-> DomainInvitation : **/domain/<id>/users/add**
+
+@enduml
+```
+
+</details>
diff --git a/docs/architecture/diagrams/model_timeline.svg b/docs/architecture/diagrams/model_timeline.svg
new file mode 100644
index 000000000..cf2eea238
--- /dev/null
+++ b/docs/architecture/diagrams/model_timeline.svg
@@ -0,0 +1 @@
+<?xml version="1.0" encoding="us-ascii" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" contentStyleType="text/css" height="986px" preserveAspectRatio="none" style="width:1507px;height:986px;background:#FFFFFF;" version="1.1" viewBox="0 0 1507 986" width="1507px" zoomAndPan="magnify"><defs/><g><!--class DomainApplication--><g id="elem_DomainApplication"><rect codeLine="38" fill="#F1F1F1" height="218.9688" id="DomainApplication" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="227" x="349" y="430"/><ellipse cx="394.25" cy="446" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M397.2188,451.6406 Q396.6406,451.9375 396,452.0781 Q395.3594,452.2344 394.6563,452.2344 Q392.1563,452.2344 390.8281,450.5938 Q389.5156,448.9375 389.5156,445.8125 Q389.5156,442.6875 390.8281,441.0313 Q392.1563,439.375 394.6563,439.375 Q395.3594,439.375 396,439.5313 Q396.6563,439.6875 397.2188,439.9844 L397.2188,442.7031 Q396.5938,442.125 396,441.8594 Q395.4063,441.5781 394.7813,441.5781 Q393.4375,441.5781 392.75,442.6563 Q392.0625,443.7188 392.0625,445.8125 Q392.0625,447.9063 392.75,448.9844 Q393.4375,450.0469 394.7813,450.0469 Q395.4063,450.0469 396,449.7813 Q396.5938,449.5 397.2188,448.9219 L397.2188,451.6406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="128" x="414.75" y="450.8467">DomainApplication</text><line style="stroke:#181818;stroke-width:0.5;" x1="350" x2="575" y1="462" y2="462"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="164" x="355" y="478.9951">Application for a domain</text><line style="stroke:#181818;stroke-width:1.0;" x1="350" x2="575" y1="486.2969" y2="486.2969"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="95" x="355" y="503.292">creator (User)</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="126" x="355" y="519.5889">investigator (User)</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="194" x="355" y="535.8857">authorizing_official (Contact)</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="134" x="355" y="552.1826">submitter (Contact)</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="180" x="355" y="568.4795">other_contacts (Contacts)</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="197" x="355" y="584.7764">requested_domain (Domain)</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="197" x="355" y="601.0732">current_websites (Websites)</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="215" x="355" y="617.3701">alternative_domains (Websites)</text><line style="stroke:#181818;stroke-width:1.0;" x1="350" x2="575" y1="624.6719" y2="624.6719"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="152" x="355" y="641.667">Request information...</text></g><!--class User--><g id="elem_User"><rect codeLine="53" fill="#F1F1F1" height="88.5938" id="User" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="146" x="389.5" y="289"/><ellipse cx="442.25" cy="305" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M445.2188,310.6406 Q444.6406,310.9375 444,311.0781 Q443.3594,311.2344 442.6563,311.2344 Q440.1563,311.2344 438.8281,309.5938 Q437.5156,307.9375 437.5156,304.8125 Q437.5156,301.6875 438.8281,300.0313 Q440.1563,298.375 442.6563,298.375 Q443.3594,298.375 444,298.5313 Q444.6563,298.6875 445.2188,298.9844 L445.2188,301.7031 Q444.5938,301.125 444,300.8594 Q443.4063,300.5781 442.7813,300.5781 Q441.4375,300.5781 440.75,301.6563 Q440.0625,302.7188 440.0625,304.8125 Q440.0625,306.9063 440.75,307.9844 Q441.4375,309.0469 442.7813,309.0469 Q443.4063,309.0469 444,308.7813 Q444.5938,308.5 445.2188,307.9219 L445.2188,310.6406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="32" x="462.75" y="309.8467">User</text><line style="stroke:#181818;stroke-width:0.5;" x1="390.5" x2="534.5" y1="321" y2="321"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="134" x="395.5" y="337.9951">Django's user class</text><line style="stroke:#181818;stroke-width:1.0;" x1="390.5" x2="534.5" y1="345.2969" y2="345.2969"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="12" x="395.5" y="362.292">...</text><line style="stroke:#181818;stroke-width:1.0;" x1="390.5" x2="534.5" y1="369.5938" y2="369.5938"/></g><g id="elem_GMN4"><path d="M6,290.5 L6,376.1641 A0,0 0 0 0 6,376.1641 L214,376.1641 A0,0 0 0 0 214,376.1641 L214,337.5 L389.23,333.5 L214,329.5 L214,300.5 L204,290.5 L6,290.5 A0,0 0 0 0 6,290.5 " fill="#FEFFDD" style="stroke:#181818;stroke-width:0.5;"/><path d="M204,290.5 L204,300.5 L214,300.5 L204,290.5 " fill="#FEFFDD" style="stroke:#181818;stroke-width:0.5;"/><text fill="#000000" font-family="sans-serif" font-size="13" lengthAdjust="spacing" textLength="151" x="12" y="307.5669">Created by DjangoOIDC</text><text fill="#000000" font-family="sans-serif" font-size="13" lengthAdjust="spacing" textLength="146" x="12" y="322.6997">when users arrive back</text><text fill="#000000" font-family="sans-serif" font-size="13" lengthAdjust="spacing" textLength="95" x="12" y="337.8325">from Login.gov</text><text fill="#000000" font-family="sans-serif" font-size="13" lengthAdjust="spacing" textLength="4" x="12" y="352.9653"> </text><text fill="#000000" font-family="sans-serif" font-size="13" font-weight="bold" lengthAdjust="spacing" textLength="73" x="12" y="368.0981">username</text><text fill="#000000" font-family="sans-serif" font-size="13" lengthAdjust="spacing" textLength="110" x="89" y="368.0981">is the Login UUID</text></g><!--class Contact--><g id="elem_Contact"><rect codeLine="69" fill="#F1F1F1" height="170.0781" id="Contact" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="182" x="371.5" y="700.5"/><ellipse cx="431.25" cy="716.5" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M434.2188,722.1406 Q433.6406,722.4375 433,722.5781 Q432.3594,722.7344 431.6563,722.7344 Q429.1563,722.7344 427.8281,721.0938 Q426.5156,719.4375 426.5156,716.3125 Q426.5156,713.1875 427.8281,711.5313 Q429.1563,709.875 431.6563,709.875 Q432.3594,709.875 433,710.0313 Q433.6563,710.1875 434.2188,710.4844 L434.2188,713.2031 Q433.5938,712.625 433,712.3594 Q432.4063,712.0781 431.7813,712.0781 Q430.4375,712.0781 429.75,713.1563 Q429.0625,714.2188 429.0625,716.3125 Q429.0625,718.4063 429.75,719.4844 Q430.4375,720.5469 431.7813,720.5469 Q432.4063,720.5469 433,720.2813 Q433.5938,720 434.2188,719.4219 L434.2188,722.1406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="54" x="451.75" y="721.3467">Contact</text><line style="stroke:#181818;stroke-width:0.5;" x1="372.5" x2="552.5" y1="732.5" y2="732.5"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="170" x="377.5" y="749.4951">Contact info for a person</text><line style="stroke:#181818;stroke-width:1.0;" x1="372.5" x2="552.5" y1="756.7969" y2="756.7969"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="71" x="377.5" y="773.792">first_name</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="92" x="377.5" y="790.0889">middle_name</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="70" x="377.5" y="806.3857">last_name</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="25" x="377.5" y="822.6826">title</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="36" x="377.5" y="838.9795">email</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="45" x="377.5" y="855.2764">phone</text><line style="stroke:#181818;stroke-width:1.0;" x1="372.5" x2="552.5" y1="862.5781" y2="862.5781"/></g><!--class Domain--><g id="elem_Domain"><rect codeLine="83" fill="#F1F1F1" height="121.1875" id="Domain" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="133" x="1367" y="380"/><ellipse cx="1403.6" cy="396" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M1406.5688,401.6406 Q1405.9906,401.9375 1405.35,402.0781 Q1404.7094,402.2344 1404.0063,402.2344 Q1401.5063,402.2344 1400.1781,400.5938 Q1398.8656,398.9375 1398.8656,395.8125 Q1398.8656,392.6875 1400.1781,391.0313 Q1401.5063,389.375 1404.0063,389.375 Q1404.7094,389.375 1405.35,389.5313 Q1406.0063,389.6875 1406.5688,389.9844 L1406.5688,392.7031 Q1405.9438,392.125 1405.35,391.8594 Q1404.7563,391.5781 1404.1313,391.5781 Q1402.7875,391.5781 1402.1,392.6563 Q1401.4125,393.7188 1401.4125,395.8125 Q1401.4125,397.9063 1402.1,398.9844 Q1402.7875,400.0469 1404.1313,400.0469 Q1404.7563,400.0469 1405.35,399.7813 Q1405.9438,399.5 1406.5688,398.9219 L1406.5688,401.6406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="53" x="1422.4" y="400.8467">Domain</text><line style="stroke:#181818;stroke-width:0.5;" x1="1368" x2="1499" y1="412" y2="412"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="121" x="1373" y="428.9951">Approved domain</text><line style="stroke:#181818;stroke-width:1.0;" x1="1368" x2="1499" y1="436.2969" y2="436.2969"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="39" x="1373" y="453.292">name</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="58" x="1373" y="469.5889">is_active</text><line style="stroke:#181818;stroke-width:1.0;" x1="1368" x2="1499" y1="476.8906" y2="476.8906"/><text fill="#000000" font-family="sans-serif" font-size="14" font-weight="bold" lengthAdjust="spacing" textLength="103" x="1373" y="493.8857">EPP methods</text></g><!--class DomainInformation--><g id="elem_DomainInformation"><rect codeLine="94" fill="#F1F1F1" height="137.4844" id="DomainInformation" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="286" x="818" y="511"/><ellipse cx="891.75" cy="527" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M894.7188,532.6406 Q894.1406,532.9375 893.5,533.0781 Q892.8594,533.2344 892.1563,533.2344 Q889.6563,533.2344 888.3281,531.5938 Q887.0156,529.9375 887.0156,526.8125 Q887.0156,523.6875 888.3281,522.0313 Q889.6563,520.375 892.1563,520.375 Q892.8594,520.375 893.5,520.5313 Q894.1563,520.6875 894.7188,520.9844 L894.7188,523.7031 Q894.0938,523.125 893.5,522.8594 Q892.9063,522.5781 892.2813,522.5781 Q890.9375,522.5781 890.25,523.6563 Q889.5625,524.7188 889.5625,526.8125 Q889.5625,528.9063 890.25,529.9844 Q890.9375,531.0469 892.2813,531.0469 Q892.9063,531.0469 893.5,530.7813 Q894.0938,530.5 894.7188,529.9219 L894.7188,532.6406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="130" x="912.25" y="531.8467">DomainInformation</text><line style="stroke:#181818;stroke-width:0.5;" x1="819" x2="1103" y1="543" y2="543"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="232" x="824" y="559.9951">Registrar information on a domain</text><line style="stroke:#181818;stroke-width:1.0;" x1="819" x2="1103" y1="567.2969" y2="567.2969"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="118" x="824" y="584.292">domain (Domain)</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="274" x="824" y="600.5889">domain_application (DomainApplication)</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="97" x="824" y="616.8857">security_email</text><line style="stroke:#181818;stroke-width:1.0;" x1="819" x2="1103" y1="624.1875" y2="624.1875"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="152" x="824" y="641.1826">Request information...</text></g><!--class UserDomainRole--><g id="elem_UserDomainRole"><rect codeLine="108" fill="#F1F1F1" height="121.1875" id="UserDomainRole" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="148" x="887" y="267"/><ellipse cx="902" cy="283" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M904.9688,288.6406 Q904.3906,288.9375 903.75,289.0781 Q903.1094,289.2344 902.4063,289.2344 Q899.9063,289.2344 898.5781,287.5938 Q897.2656,285.9375 897.2656,282.8125 Q897.2656,279.6875 898.5781,278.0313 Q899.9063,276.375 902.4063,276.375 Q903.1094,276.375 903.75,276.5313 Q904.4063,276.6875 904.9688,276.9844 L904.9688,279.7031 Q904.3438,279.125 903.75,278.8594 Q903.1563,278.5781 902.5313,278.5781 Q901.1875,278.5781 900.5,279.6563 Q899.8125,280.7188 899.8125,282.8125 Q899.8125,284.9063 900.5,285.9844 Q901.1875,287.0469 902.5313,287.0469 Q903.1563,287.0469 903.75,286.7813 Q904.3438,286.5 904.9688,285.9219 L904.9688,288.6406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="116" x="916" y="287.8467">UserDomainRole</text><line style="stroke:#181818;stroke-width:0.5;" x1="888" x2="1034" y1="299" y2="299"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="84" x="893" y="315.9951">Permissions</text><line style="stroke:#181818;stroke-width:1.0;" x1="888" x2="1034" y1="323.2969" y2="323.2969"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="118" x="893" y="340.292">domain (Domain)</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="77" x="893" y="356.5889">user (User)</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="93" x="893" y="372.8857">role="ADMIN"</text><line style="stroke:#181818;stroke-width:1.0;" x1="888" x2="1034" y1="380.1875" y2="380.1875"/></g><!--class DomainInvitation--><g id="elem_DomainInvitation"><rect codeLine="120" fill="#F1F1F1" height="121.1875" id="DomainInvitation" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="156" x="384.5" y="133"/><ellipse cx="404" cy="149" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M406.9688,154.6406 Q406.3906,154.9375 405.75,155.0781 Q405.1094,155.2344 404.4063,155.2344 Q401.9063,155.2344 400.5781,153.5938 Q399.2656,151.9375 399.2656,148.8125 Q399.2656,145.6875 400.5781,144.0313 Q401.9063,142.375 404.4063,142.375 Q405.1094,142.375 405.75,142.5313 Q406.4063,142.6875 406.9688,142.9844 L406.9688,145.7031 Q406.3438,145.125 405.75,144.8594 Q405.1563,144.5781 404.5313,144.5781 Q403.1875,144.5781 402.5,145.6563 Q401.8125,146.7188 401.8125,148.8125 Q401.8125,150.9063 402.5,151.9844 Q403.1875,153.0469 404.5313,153.0469 Q405.1563,153.0469 405.75,152.7813 Q406.3438,152.5 406.9688,151.9219 L406.9688,154.6406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="114" x="419" y="153.8467">DomainInvitation</text><line style="stroke:#181818;stroke-width:0.5;" x1="385.5" x2="539.5" y1="165" y2="165"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="144" x="390.5" y="181.9951">Email invitations sent</text><line style="stroke:#181818;stroke-width:1.0;" x1="385.5" x2="539.5" y1="189.2969" y2="189.2969"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="36" x="390.5" y="206.292">email</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="118" x="390.5" y="222.5889">domain (Domain)</text><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="43" x="390.5" y="238.8857">status</text><line style="stroke:#181818;stroke-width:1.0;" x1="385.5" x2="539.5" y1="246.1875" y2="246.1875"/></g><!--entity applicant--><g id="elem_applicant"><ellipse cx="110" cy="465" fill="#FF0000" rx="8" ry="8" style="stroke:#181818;stroke-width:0.5;"/><path d="M110,473 L110,500 M97,481 L123,481 M110,500 L97,515 M110,500 L123,515 " fill="none" style="stroke:#181818;stroke-width:0.5;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="62" x="79" y="529.4951">applicant</text></g><!--entity analyst--><g id="elem_analyst"><ellipse cx="110" cy="576" fill="#0000FF" rx="8" ry="8" style="stroke:#181818;stroke-width:0.5;"/><path d="M110,584 L110,611 M97,592 L123,592 M110,611 L97,626 M110,611 L123,626 " fill="none" style="stroke:#181818;stroke-width:0.5;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="48" x="86" y="640.4951">analyst</text></g><!--entity user1--><g id="elem_user1"><ellipse cx="961" cy="692" fill="#008000" rx="8" ry="8" style="stroke:#181818;stroke-width:0.5;"/><path d="M961,700 L961,727 M948,708 L974,708 M961,727 L948,742 M961,727 L974,742 " fill="none" style="stroke:#181818;stroke-width:0.5;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="40" x="941" y="756.4951">user1</text></g><!--entity user2--><g id="elem_user2"><ellipse cx="462.5" cy="914" fill="#008000" rx="8" ry="8" style="stroke:#181818;stroke-width:0.5;"/><path d="M462.5,922 L462.5,949 M449.5,930 L475.5,930 M462.5,949 L449.5,964 M462.5,949 L475.5,964 " fill="none" style="stroke:#181818;stroke-width:0.5;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="40" x="442.5" y="978.4951">user2</text></g><!--entity user3--><g id="elem_user3"><ellipse cx="462.5" cy="14" fill="#008000" rx="8" ry="8" style="stroke:#181818;stroke-width:0.5;"/><path d="M462.5,22 L462.5,49 M449.5,30 L475.5,30 M462.5,49 L449.5,64 M462.5,49 L475.5,64 " fill="none" style="stroke:#181818;stroke-width:0.5;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="40" x="442.5" y="78.4951">user3</text></g><!--link User to DomainApplication--><g id="link_User_DomainApplication"><path codeLine="67" d="M462.5,378.24 C462.5,393.63 462.5,411.61 462.5,429.87 " fill="none" id="User-DomainApplication" style="stroke:#181818;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="13" lengthAdjust="spacing" textLength="129" x="386.5" y="408.5669">creator, investigator</text></g><!--link DomainApplication to Contact--><g id="link_DomainApplication_Contact"><path codeLine="81" d="M462.5,662.3 C462.5,670.63 462.5,678.96 462.5,687.14 " fill="none" id="DomainApplication-Contact" style="stroke:#181818;stroke-width:1.0;"/><polygon fill="#181818" points="462.5,649.24,458.5,655.24,462.5,661.24,466.5,655.24,462.5,649.24" style="stroke:#181818;stroke-width:1.0;"/><polygon fill="#181818" points="462.5,700.5,466.5,694.5,462.5,688.5,458.5,694.5,462.5,700.5" style="stroke:#181818;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="13" lengthAdjust="spacing" textLength="292" x="305" y="679.3169">authorizing_official, submitter, other_contacts</text></g><!--link DomainApplication to Domain--><g id="link_DomainApplication_Domain"><path codeLine="92" d="M576.31,523.6 C645.89,514.11 737.02,502.24 818,493.5 C1014.69,472.27 1247.54,454.08 1361.91,445.6 " fill="none" id="DomainApplication-to-Domain" style="stroke:#0000FF;stroke-width:1.0;stroke-dasharray:7.0,7.0;"/><polygon fill="#0000FF" points="1366.93,445.22,1357.6577,441.8997,1361.9438,445.5915,1358.252,449.8776,1366.93,445.22" style="stroke:#0000FF;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="13" lengthAdjust="spacing" textLength="62" x="930" y="462.5669">approve()</text></g><!--link DomainInformation to Domain--><g id="link_DomainInformation_Domain"><path codeLine="104" d="M1104.12,537.51 C1191.37,511.74 1298.99,479.94 1366.7,459.94 " fill="none" id="DomainInformation-Domain" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainInformation to DomainApplication--><g id="link_DomainInformation_DomainApplication"><path codeLine="105" d="M817.84,564.21 C807.75,563.25 797.72,562.34 788,561.5 C717.68,555.44 638.66,550.05 576.21,546.13 " fill="none" id="DomainInformation-DomainApplication" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainApplication to DomainInformation--><g id="link_DomainApplication_DomainInformation"><path codeLine="106" d="M576.25,580.28 C586.21,582.79 596.23,584.93 606,586.5 C673.3,597.31 748.71,597.44 812.58,594.12 " fill="none" id="DomainApplication-to-DomainInformation" style="stroke:#0000FF;stroke-width:1.0;stroke-dasharray:7.0,7.0;"/><polygon fill="#0000FF" points="817.94,593.83,808.7364,590.3237,812.9474,594.101,809.1701,598.312,817.94,593.83" style="stroke:#0000FF;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="13" lengthAdjust="spacing" textLength="62" x="666" y="582.5669">approve()</text></g><!--link UserDomainRole to User--><g id="link_UserDomainRole_User"><path codeLine="116" d="M886.91,328.38 C792.61,329.52 629.56,331.49 535.7,332.63 " fill="none" id="UserDomainRole-User" style="stroke:#181818;stroke-width:1.0;"/></g><!--link UserDomainRole to Domain--><g id="link_UserDomainRole_Domain"><path codeLine="117" d="M1035.14,343.61 C1110.86,360.49 1232.5,388.16 1337,414.5 C1346.66,416.94 1356.85,419.6 1366.83,422.27 " fill="none" id="UserDomainRole-Domain" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainApplication to UserDomainRole--><g id="link_DomainApplication_UserDomainRole"><path codeLine="118" d="M576.11,435.45 C585.87,428.84 595.88,422.75 606,417.5 C653.58,392.82 792.69,361.49 881.7,343.05 " fill="none" id="DomainApplication-to-UserDomainRole" style="stroke:#0000FF;stroke-width:1.0;stroke-dasharray:7.0,7.0;"/><polygon fill="#0000FF" points="886.69,342.02,877.066,339.9273,881.7938,343.0335,878.6876,347.7613,886.69,342.02" style="stroke:#0000FF;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="13" lengthAdjust="spacing" textLength="62" x="666" y="360.5669">approve()</text></g><!--link DomainInvitation to Domain--><g id="link_DomainInvitation_Domain"><path codeLine="128" d="M540.56,191.16 C662.53,189.45 906.48,194.23 1104,249.5 C1215.21,280.62 1239.49,302.64 1337,364.5 C1346.93,370.8 1357.04,377.94 1366.79,385.26 " fill="none" id="DomainInvitation-Domain" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainInvitation to UserDomainRole--><g id="link_DomainInvitation_UserDomainRole"><path codeLine="129" d="M540.53,224.25 C561.57,232.07 584.5,240.07 606,246.5 C699.5,274.45 809.35,298.22 881.59,312.64 " fill="none" id="DomainInvitation-to-UserDomainRole" style="stroke:#008000;stroke-width:1.0;stroke-dasharray:7.0,7.0;"/><polygon fill="#008000" points="886.73,313.66,878.6956,307.9636,881.8282,312.6739,877.1179,315.8065,886.73,313.66" style="stroke:#008000;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="13" lengthAdjust="spacing" textLength="105" x="644.5" y="242.5669">User.first_login()</text></g><!--link applicant to DomainApplication--><g id="link_applicant_DomainApplication"><path codeLine="132" d="M141.19,498.38 C185.86,504.11 272.03,515.17 343.75,524.38 " fill="none" id="applicant-to-DomainApplication" style="stroke:#181818;stroke-width:1.0;"/><polygon fill="#181818" points="348.89,525.04,340.4647,519.9396,343.9297,524.411,339.4583,527.8761,348.89,525.04" style="stroke:#181818;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="13" font-weight="bold" lengthAdjust="spacing" textLength="63" x="250" y="507.5669">/register</text></g><!--link analyst to DomainApplication--><g id="link_analyst_DomainApplication"><path codeLine="135" d="M134.27,601.12 C176.46,593.17 268.22,575.89 343.87,561.65 " fill="none" id="analyst-to-DomainApplication" style="stroke:#0000FF;stroke-width:1.0;"/><polygon fill="#0000FF" points="348.83,560.72,339.2438,558.4606,343.9169,561.6483,340.7292,566.3215,348.83,560.72" style="stroke:#0000FF;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="13" font-weight="bold" lengthAdjust="spacing" textLength="73" x="245" y="562.5669">approve()</text></g><!--link user1 to Domain--><g id="link_user1_Domain"><path codeLine="138" d="M981.21,716.25 C1039.44,699.66 1217.35,643.92 1337,551.5 C1354.06,538.32 1370.3,521.69 1384.39,505.34 " fill="none" id="user1-to-Domain" style="stroke:#008000;stroke-width:1.0;"/><polygon fill="#008000" points="1387.93,501.19,1379.0451,505.4394,1384.6843,504.9933,1385.1304,510.6326,1387.93,501.19" style="stroke:#008000;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="13" font-weight="bold" lengthAdjust="spacing" textLength="201" x="1135" y="547.5669">/domain/<id>/nameservers</text></g><!--link user2 to DomainInformation--><g id="link_user2_DomainInformation"><path codeLine="140" d="M482.72,936.1 C505.87,926.74 545.47,909.32 576,888.5 C696.58,806.27 701.45,754.37 818,666.5 C824.79,661.38 831.89,656.29 839.14,651.29 " fill="none" id="user2-to-DomainInformation" style="stroke:#008000;stroke-width:1.0;"/><polygon fill="#008000" points="843.68,648.19,833.9946,649.9769,839.5552,651.016,838.5162,656.5766,843.68,648.19" style="stroke:#008000;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="13" font-weight="bold" lengthAdjust="spacing" textLength="146" x="624" y="687.5669">/domain/<id>/?????</text></g><!--link user3 to UserDomainRole--><g id="link_user3_UserDomainRole"><path codeLine="142" d="M482.71,53.6 C533.19,80.24 673.59,154.93 788,221.5 C819.21,239.66 853.13,260.35 882.53,278.61 " fill="none" id="user3-to-UserDomainRole" style="stroke:#008000;stroke-width:1.0;"/><polygon fill="#008000" points="886.84,281.28,881.2959,273.1398,882.5896,278.6467,877.0827,279.9405,886.84,281.28" style="stroke:#008000;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="13" font-weight="bold" lengthAdjust="spacing" textLength="180" x="607" y="119.5669">/domain/<id>/users/add</text></g><!--link user3 to DomainInvitation--><g id="link_user3_DomainInvitation"><path codeLine="143" d="M462.5,81.87 C462.5,95.81 462.5,112 462.5,127.64 " fill="none" id="user3-to-DomainInvitation" style="stroke:#008000;stroke-width:1.0;"/><polygon fill="#008000" points="462.5,132.92,466.5,123.92,462.5,127.92,458.5,123.92,462.5,132.92" style="stroke:#008000;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="13" font-weight="bold" lengthAdjust="spacing" textLength="180" x="361" y="111.8169">/domain/<id>/users/add</text></g><!--SRC=[XLNBRjim4BphAuZuKDS8RJIv1QcBD0uA0tq41e8UYaAWfBNCbY9TadAQ5ldtBYbHrijfOCJYBZcSdPsLcH3giU2_kSmZ0LjBh2AQvpjBCguXjLp9A4e5CuQiLC6ulDdl1K-PIv2_4I7jzLPfmaZczs6AKloNQc0MuzC70_er1hWyWB4yxqPPQNTAyp_88rRRBKyv4sHwgwHbgNKRJ9aKt5hGdQYoEz1nMWLCap8kf-5NYJT15bUCoBIgmENIKckGDdw4n70BUF9h_UJ92Bn7Oaq7gC_sDlZADXK-5eHr5rw1-NmUFGUzN6bUejKF9dFrof3IHNmoOBW3_l4vaie2yIrGMtyM4xTEFCX8ybI3V5clRZ7-k0FfqGnXMYDDah3q9oQsMXNaeygvdEVg467aEbcwZP8LSBr8beGRWffLUyZ3mteLWSo8kpoAXXscWbPrN8L6ddSQ66gjTVVbXcUdIss9FJ9LZIMsN1iREqQuA7YM2GWh16iobblXlW592VpUxvI4eDO8rndLTDRSVZLggVEZZyvxvkdQF7XRgmFgtxLqpO-R6C-X-26EKj_TtvC2yDxCEBN7YSxzW7sR9AA4x_Dbk14PL_TDNtUvh8y6yxGsa7DZDTDjwn7yz6TlwFig4hCMc-c0e1ytm17XzYaEwl_7yaEcbDRqNygrRXqJggzH6sD4BkVKQkD62V1QtOCkk35um9nMnSzbyohIUFRjsStgq_hpMR1R3vbMKp4IxfHyefRUgHVwVk3ssFOxrmatVNNGO9Ea3LM5FWshCxYzDA6I0VImJqqAEUSQG8wITe5vDSh2lKmSUvm7D-zLsTAIoGQowBYa6LsE_8nSaTbieRsfGSzc3OnauWdDDdcFkaLXHMiTJo8rOXwHN7_VaCa7LqjKBMWeZRRc3sbKyYskURPSk3d7pGTyUI7M4UgY3NKn0jMrQG_rdVlhu5ssyIu9hLyF3UX0-XwY_o5Oi2n3r1CGWzQ-2F8F]--></g></svg>
\ No newline at end of file
diff --git a/docs/architecture/diagrams/models_diagram.md b/docs/architecture/diagrams/models_diagram.md
new file mode 100644
index 000000000..77fa36707
--- /dev/null
+++ b/docs/architecture/diagrams/models_diagram.md
@@ -0,0 +1,275 @@
+# Complete model documentation
+
+This is an auto-generated diagram of our data models generated with the
+[django-model2puml](https://github.com/sen-den/django-model2puml) library
+using the command
+
+```bash
+$ docker compose app ./manage.py generate_puml --include registrar
+```
+
+
+
+<details>
+<summary>PlantUML source code</summary>
+
+To regenerate this image using Docker, run
+
+```bash
+$ docker run -v $(pwd):$(pwd) -w $(pwd) -it plantuml/plantuml -tsvg models_diagram.md
+```
+
+```plantuml
+@startuml
+class "registrar.Contact <Registrar>" as registrar.Contact #d6f4e9 {
+ contact
+ --
+ + id (BigAutoField)
+ + created_at (DateTimeField)
+ + updated_at (DateTimeField)
+ ~ user (OneToOneField)
+ + first_name (TextField)
+ + middle_name (TextField)
+ + last_name (TextField)
+ + title (TextField)
+ + email (TextField)
+ + phone (PhoneNumberField)
+ --
+}
+registrar.Contact -- registrar.User
+
+
+class "registrar.DomainApplication <Registrar>" as registrar.DomainApplication #d6f4e9 {
+ domain application
+ --
+ + id (BigAutoField)
+ + created_at (DateTimeField)
+ + updated_at (DateTimeField)
+ + status (FSMField)
+ ~ creator (ForeignKey)
+ ~ investigator (ForeignKey)
+ + organization_type (CharField)
+ + federally_recognized_tribe (BooleanField)
+ + state_recognized_tribe (BooleanField)
+ + tribe_name (TextField)
+ + federal_agency (TextField)
+ + federal_type (CharField)
+ + is_election_board (BooleanField)
+ + organization_name (TextField)
+ + address_line1 (TextField)
+ + address_line2 (CharField)
+ + city (TextField)
+ + state_territory (CharField)
+ + zipcode (CharField)
+ + urbanization (TextField)
+ + type_of_work (TextField)
+ + more_organization_information (TextField)
+ ~ authorizing_official (ForeignKey)
+ ~ requested_domain (OneToOneField)
+ ~ submitter (ForeignKey)
+ + purpose (TextField)
+ + no_other_contacts_rationale (TextField)
+ + anything_else (TextField)
+ + is_policy_acknowledged (BooleanField)
+ # current_websites (ManyToManyField)
+ # alternative_domains (ManyToManyField)
+ # other_contacts (ManyToManyField)
+ --
+}
+registrar.DomainApplication -- registrar.User
+registrar.DomainApplication -- registrar.User
+registrar.DomainApplication -- registrar.Contact
+registrar.DomainApplication -- registrar.Domain
+registrar.DomainApplication -- registrar.Contact
+registrar.DomainApplication *--* registrar.Website
+registrar.DomainApplication *--* registrar.Website
+registrar.DomainApplication *--* registrar.Contact
+
+
+class "registrar.DomainInformation <Registrar>" as registrar.DomainInformation #d6f4e9 {
+ domain information
+ --
+ + id (BigAutoField)
+ + created_at (DateTimeField)
+ + updated_at (DateTimeField)
+ ~ creator (ForeignKey)
+ ~ domain_application (OneToOneField)
+ + organization_type (CharField)
+ + federally_recognized_tribe (BooleanField)
+ + state_recognized_tribe (BooleanField)
+ + tribe_name (TextField)
+ + federal_agency (TextField)
+ + federal_type (CharField)
+ + is_election_board (BooleanField)
+ + organization_name (TextField)
+ + address_line1 (TextField)
+ + address_line2 (CharField)
+ + city (TextField)
+ + state_territory (CharField)
+ + zipcode (CharField)
+ + urbanization (TextField)
+ + type_of_work (TextField)
+ + more_organization_information (TextField)
+ ~ authorizing_official (ForeignKey)
+ ~ domain (OneToOneField)
+ ~ submitter (ForeignKey)
+ + purpose (TextField)
+ + no_other_contacts_rationale (TextField)
+ + anything_else (TextField)
+ + is_policy_acknowledged (BooleanField)
+ + security_email (EmailField)
+ # other_contacts (ManyToManyField)
+ --
+}
+registrar.DomainInformation -- registrar.User
+registrar.DomainInformation -- registrar.DomainApplication
+registrar.DomainInformation -- registrar.Contact
+registrar.DomainInformation -- registrar.Domain
+registrar.DomainInformation -- registrar.Contact
+registrar.DomainInformation *--* registrar.Contact
+
+
+class "registrar.Domain <Registrar>" as registrar.Domain #d6f4e9 {
+ domain
+ --
+ + id (BigAutoField)
+ + created_at (DateTimeField)
+ + updated_at (DateTimeField)
+ + name (CharField)
+ + is_active (FSMField)
+ --
+}
+
+
+class "registrar.HostIP <Registrar>" as registrar.HostIP #d6f4e9 {
+ host ip
+ --
+ + id (BigAutoField)
+ + created_at (DateTimeField)
+ + updated_at (DateTimeField)
+ + address (CharField)
+ ~ host (ForeignKey)
+ --
+}
+registrar.HostIP -- registrar.Host
+
+
+class "registrar.Host <Registrar>" as registrar.Host #d6f4e9 {
+ host
+ --
+ + id (BigAutoField)
+ + created_at (DateTimeField)
+ + updated_at (DateTimeField)
+ + name (CharField)
+ ~ domain (ForeignKey)
+ --
+}
+registrar.Host -- registrar.Domain
+
+
+class "registrar.UserDomainRole <Registrar>" as registrar.UserDomainRole #d6f4e9 {
+ user domain role
+ --
+ + id (BigAutoField)
+ + created_at (DateTimeField)
+ + updated_at (DateTimeField)
+ ~ user (ForeignKey)
+ ~ domain (ForeignKey)
+ + role (TextField)
+ --
+}
+registrar.UserDomainRole -- registrar.User
+registrar.UserDomainRole -- registrar.Domain
+
+
+class "registrar.DomainInvitation <Registrar>" as registrar.DomainInvitation #d6f4e9 {
+ domain invitation
+ --
+ + id (BigAutoField)
+ + created_at (DateTimeField)
+ + updated_at (DateTimeField)
+ + email (EmailField)
+ ~ domain (ForeignKey)
+ + status (FSMField)
+ --
+}
+registrar.DomainInvitation -- registrar.Domain
+
+
+class "registrar.Nameserver <Registrar>" as registrar.Nameserver #d6f4e9 {
+ nameserver
+ --
+ + id (BigAutoField)
+ + created_at (DateTimeField)
+ + updated_at (DateTimeField)
+ + name (CharField)
+ ~ domain (ForeignKey)
+ ~ host_ptr (OneToOneField)
+ --
+}
+registrar.Nameserver -- registrar.Domain
+registrar.Nameserver -- registrar.Host
+
+
+class "registrar.PublicContact <Registrar>" as registrar.PublicContact #d6f4e9 {
+ public contact
+ --
+ + id (BigAutoField)
+ + created_at (DateTimeField)
+ + updated_at (DateTimeField)
+ + contact_type (CharField)
+ + name (TextField)
+ + org (TextField)
+ + street1 (TextField)
+ + street2 (TextField)
+ + street3 (TextField)
+ + city (TextField)
+ + sp (TextField)
+ + pc (TextField)
+ + cc (TextField)
+ + email (TextField)
+ + voice (TextField)
+ + fax (TextField)
+ + pw (TextField)
+ --
+}
+
+
+class "registrar.User <Registrar>" as registrar.User #d6f4e9 {
+ user
+ --
+ + id (BigAutoField)
+ + password (CharField)
+ + last_login (DateTimeField)
+ + is_superuser (BooleanField)
+ + username (CharField)
+ + first_name (CharField)
+ + last_name (CharField)
+ + email (EmailField)
+ + is_staff (BooleanField)
+ + is_active (BooleanField)
+ + date_joined (DateTimeField)
+ + phone (PhoneNumberField)
+ # groups (ManyToManyField)
+ # user_permissions (ManyToManyField)
+ # domains (ManyToManyField)
+ --
+}
+registrar.User *--* registrar.Domain
+
+
+class "registrar.Website <Registrar>" as registrar.Website #d6f4e9 {
+ website
+ --
+ + id (BigAutoField)
+ + created_at (DateTimeField)
+ + updated_at (DateTimeField)
+ + website (CharField)
+ --
+}
+
+
+@enduml
+```
+
+</details>
diff --git a/docs/architecture/diagrams/models_diagram.svg b/docs/architecture/diagrams/models_diagram.svg
new file mode 100644
index 000000000..e0cdd355f
--- /dev/null
+++ b/docs/architecture/diagrams/models_diagram.svg
@@ -0,0 +1 @@
+<?xml version="1.0" encoding="us-ascii" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" contentStyleType="text/css" height="2165px" preserveAspectRatio="none" style="width:1584px;height:2165px;background:#FFFFFF;" version="1.1" viewBox="0 0 1584 2165" width="1584px" zoomAndPan="magnify"><defs/><g><!--cluster registrar--><g id="cluster_registrar"><path d="M8.5,6 L78.5,6 A3.75,3.75 0 0 1 81,8.5 L88,28.2969 L1490.5,28.2969 A2.5,2.5 0 0 1 1493,30.7969 L1493,2155.5 A2.5,2.5 0 0 1 1490.5,2158 L8.5,2158 A2.5,2.5 0 0 1 6,2155.5 L6,8.5 A2.5,2.5 0 0 1 8.5,6 " fill="none" style="stroke:#000000;stroke-width:1.5;"/><line style="stroke:#000000;stroke-width:1.5;" x1="6" x2="88" y1="28.2969" y2="28.2969"/><text fill="#000000" font-family="sans-serif" font-size="14" font-weight="bold" lengthAdjust="spacing" textLength="69" x="10" y="20.9951">registrar</text></g><!--class Contact--><g id="elem_Contact"><rect codeLine="23" fill="#D6F4E9" height="235.2656" id="Contact" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="217" x="895.5" y="1316"/><ellipse cx="913.2" cy="1332" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M916.1688,1337.6406 Q915.5906,1337.9375 914.95,1338.0781 Q914.3094,1338.2344 913.6063,1338.2344 Q911.1063,1338.2344 909.7781,1336.5938 Q908.4656,1334.9375 908.4656,1331.8125 Q908.4656,1328.6875 909.7781,1327.0313 Q911.1063,1325.375 913.6063,1325.375 Q914.3094,1325.375 914.95,1325.5313 Q915.6063,1325.6875 916.1688,1325.9844 L916.1688,1328.7031 Q915.5438,1328.125 914.95,1327.8594 Q914.3563,1327.5781 913.7313,1327.5781 Q912.3875,1327.5781 911.7,1328.6563 Q911.0125,1329.7188 911.0125,1331.8125 Q911.0125,1333.9063 911.7,1334.9844 Q912.3875,1336.0469 913.7313,1336.0469 Q914.3563,1336.0469 914.95,1335.7813 Q915.5438,1335.5 916.1688,1334.9219 L916.1688,1337.6406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="115" x="927.8" y="1336.8467">registrar.Contact</text><rect fill="#FFFFFF" height="15.9688" style="stroke:#181818;stroke-width:1.0;stroke-dasharray:2.0,2.0;" width="58" x="1057.5" y="1313"/><text fill="#000000" font-family="sans-serif" font-size="12" font-style="italic" lengthAdjust="spacing" textLength="56" x="1058.5" y="1325.1387">Registrar</text><line style="stroke:#181818;stroke-width:0.5;" x1="896.5" x2="1111.5" y1="1348" y2="1348"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="52" x="901.5" y="1364.9951">contact</text><line style="stroke:#181818;stroke-width:1.0;" x1="896.5" x2="1111.5" y1="1372.2969" y2="1372.2969"/><ellipse cx="906.5" cy="1385.9453" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="112" x="915.5" y="1389.292">id (BigAutoField)</text><ellipse cx="906.5" cy="1402.2422" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="186" x="915.5" y="1405.5889">created_at (DateTimeField)</text><ellipse cx="906.5" cy="1418.5391" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="191" x="915.5" y="1421.8857">updated_at (DateTimeField)</text><polygon fill="#4177AF" points="906.5,1430.8359,902.5,1436.8359,910.5,1436.8359" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="153" x="915.5" y="1438.1826">user (OneToOneField)</text><ellipse cx="906.5" cy="1451.1328" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="147" x="915.5" y="1454.4795">first_name (TextField)</text><ellipse cx="906.5" cy="1467.4297" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="168" x="915.5" y="1470.7764">middle_name (TextField)</text><ellipse cx="906.5" cy="1483.7266" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="146" x="915.5" y="1487.0732">last_name (TextField)</text><ellipse cx="906.5" cy="1500.0234" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="101" x="915.5" y="1503.3701">title (TextField)</text><ellipse cx="906.5" cy="1516.3203" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="112" x="915.5" y="1519.667">email (TextField)</text><ellipse cx="906.5" cy="1532.6172" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="191" x="915.5" y="1535.9639">phone (PhoneNumberField)</text><line style="stroke:#181818;stroke-width:1.0;" x1="896.5" x2="1111.5" y1="1543.2656" y2="1543.2656"/></g><!--class User--><g id="elem_User"><rect codeLine="237" fill="#D6F4E9" height="316.75" id="User" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="284" x="862" y="1611"/><ellipse cx="921.25" cy="1627" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M924.2188,1632.6406 Q923.6406,1632.9375 923,1633.0781 Q922.3594,1633.2344 921.6563,1633.2344 Q919.1563,1633.2344 917.8281,1631.5938 Q916.5156,1629.9375 916.5156,1626.8125 Q916.5156,1623.6875 917.8281,1622.0313 Q919.1563,1620.375 921.6563,1620.375 Q922.3594,1620.375 923,1620.5313 Q923.6563,1620.6875 924.2188,1620.9844 L924.2188,1623.7031 Q923.5938,1623.125 923,1622.8594 Q922.4063,1622.5781 921.7813,1622.5781 Q920.4375,1622.5781 919.75,1623.6563 Q919.0625,1624.7188 919.0625,1626.8125 Q919.0625,1628.9063 919.75,1629.9844 Q920.4375,1631.0469 921.7813,1631.0469 Q922.4063,1631.0469 923,1630.7813 Q923.5938,1630.5 924.2188,1629.9219 L924.2188,1632.6406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="93" x="941.75" y="1631.8467">registrar.User</text><rect fill="#FFFFFF" height="15.9688" style="stroke:#181818;stroke-width:1.0;stroke-dasharray:2.0,2.0;" width="58" x="1091" y="1608"/><text fill="#000000" font-family="sans-serif" font-size="12" font-style="italic" lengthAdjust="spacing" textLength="56" x="1092" y="1620.1387">Registrar</text><line style="stroke:#181818;stroke-width:0.5;" x1="863" x2="1145" y1="1643" y2="1643"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="31" x="868" y="1659.9951">user</text><line style="stroke:#181818;stroke-width:1.0;" x1="863" x2="1145" y1="1667.2969" y2="1667.2969"/><ellipse cx="873" cy="1680.9453" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="112" x="882" y="1684.292">id (BigAutoField)</text><ellipse cx="873" cy="1697.2422" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="145" x="882" y="1700.5889">password (CharField)</text><ellipse cx="873" cy="1713.5391" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="177" x="882" y="1716.8857">last_login (DateTimeField)</text><ellipse cx="873" cy="1729.8359" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="192" x="882" y="1733.1826">is_superuser (BooleanField)</text><ellipse cx="873" cy="1746.1328" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="148" x="882" y="1749.4795">username (CharField)</text><ellipse cx="873" cy="1762.4297" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="149" x="882" y="1765.7764">first_name (CharField)</text><ellipse cx="873" cy="1778.7266" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="148" x="882" y="1782.0732">last_name (CharField)</text><ellipse cx="873" cy="1795.0234" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="118" x="882" y="1798.3701">email (EmailField)</text><ellipse cx="873" cy="1811.3203" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="150" x="882" y="1814.667">is_staff (BooleanField)</text><ellipse cx="873" cy="1827.6172" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="161" x="882" y="1830.9639">is_active (BooleanField)</text><ellipse cx="873" cy="1843.9141" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="193" x="882" y="1847.2607">date_joined (DateTimeField)</text><ellipse cx="873" cy="1860.2109" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="191" x="882" y="1863.5576">phone (PhoneNumberField)</text><polygon fill="#FFFF44" points="873,1871.5078,877,1875.5078,873,1879.5078,869,1875.5078" style="stroke:#B38D22;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="185" x="882" y="1879.8545">groups (ManyToManyField)</text><polygon fill="#FFFF44" points="873,1887.8047,877,1891.8047,873,1895.8047,869,1891.8047" style="stroke:#B38D22;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="258" x="882" y="1896.1514">user_permissions (ManyToManyField)</text><polygon fill="#FFFF44" points="873,1904.1016,877,1908.1016,873,1912.1016,869,1908.1016" style="stroke:#B38D22;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="195" x="882" y="1912.4482">domains (ManyToManyField)</text><line style="stroke:#181818;stroke-width:1.0;" x1="863" x2="1145" y1="1919.75" y2="1919.75"/></g><!--class DomainApplication--><g id="elem_DomainApplication"><rect codeLine="41" fill="#D6F4E9" height="593.7969" id="DomainApplication" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="314" x="1029" y="662"/><ellipse cx="1057.05" cy="678" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M1060.0188,683.6406 Q1059.4406,683.9375 1058.8,684.0781 Q1058.1594,684.2344 1057.4563,684.2344 Q1054.9563,684.2344 1053.6281,682.5938 Q1052.3156,680.9375 1052.3156,677.8125 Q1052.3156,674.6875 1053.6281,673.0313 Q1054.9563,671.375 1057.4563,671.375 Q1058.1594,671.375 1058.8,671.5313 Q1059.4563,671.6875 1060.0188,671.9844 L1060.0188,674.7031 Q1059.3938,674.125 1058.8,673.8594 Q1058.2063,673.5781 1057.5813,673.5781 Q1056.2375,673.5781 1055.55,674.6563 Q1054.8625,675.7188 1054.8625,677.8125 Q1054.8625,679.9063 1055.55,680.9844 Q1056.2375,682.0469 1057.5813,682.0469 Q1058.2063,682.0469 1058.8,681.7813 Q1059.3938,681.5 1060.0188,680.9219 L1060.0188,683.6406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="189" x="1073.95" y="682.8467">registrar.DomainApplication</text><rect fill="#FFFFFF" height="15.9688" style="stroke:#181818;stroke-width:1.0;stroke-dasharray:2.0,2.0;" width="58" x="1288" y="659"/><text fill="#000000" font-family="sans-serif" font-size="12" font-style="italic" lengthAdjust="spacing" textLength="56" x="1289" y="671.1387">Registrar</text><line style="stroke:#181818;stroke-width:0.5;" x1="1030" x2="1342" y1="694" y2="694"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="129" x="1035" y="710.9951">domain application</text><line style="stroke:#181818;stroke-width:1.0;" x1="1030" x2="1342" y1="718.2969" y2="718.2969"/><ellipse cx="1040" cy="731.9453" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="112" x="1049" y="735.292">id (BigAutoField)</text><ellipse cx="1040" cy="748.2422" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="186" x="1049" y="751.5889">created_at (DateTimeField)</text><ellipse cx="1040" cy="764.5391" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="191" x="1049" y="767.8857">updated_at (DateTimeField)</text><ellipse cx="1040" cy="780.8359" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="118" x="1049" y="784.1826">status (FSMField)</text><polygon fill="#4177AF" points="1040,793.1328,1036,799.1328,1044,799.1328" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="140" x="1049" y="800.4795">creator (ForeignKey)</text><polygon fill="#4177AF" points="1040,809.4297,1036,815.4297,1044,815.4297" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="171" x="1049" y="816.7764">investigator (ForeignKey)</text><ellipse cx="1040" cy="829.7266" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="200" x="1049" y="833.0732">organization_type (CharField)</text><ellipse cx="1040" cy="846.0234" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="283" x="1049" y="849.3701">federally_recognized_tribe (BooleanField)</text><ellipse cx="1040" cy="862.3203" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="261" x="1049" y="865.667">state_recognized_tribe (BooleanField)</text><ellipse cx="1040" cy="878.6172" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="153" x="1049" y="881.9639">tribe_name (TextField)</text><ellipse cx="1040" cy="894.9141" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="180" x="1049" y="898.2607">federal_agency (TextField)</text><ellipse cx="1040" cy="911.2109" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="162" x="1049" y="914.5576">federal_type (CharField)</text><ellipse cx="1040" cy="927.5078" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="223" x="1049" y="930.8545">is_election_board (BooleanField)</text><ellipse cx="1040" cy="943.8047" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="207" x="1049" y="947.1514">organization_name (TextField)</text><ellipse cx="1040" cy="960.1016" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="172" x="1049" y="963.4482">address_line1 (TextField)</text><ellipse cx="1040" cy="976.3984" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="174" x="1049" y="979.7451">address_line2 (CharField)</text><ellipse cx="1040" cy="992.6953" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="99" x="1049" y="996.042">city (TextField)</text><ellipse cx="1040" cy="1008.9922" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="173" x="1049" y="1012.3389">state_territory (CharField)</text><ellipse cx="1040" cy="1025.2891" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="133" x="1049" y="1028.6357">zipcode (CharField)</text><ellipse cx="1040" cy="1041.5859" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="161" x="1049" y="1044.9326">urbanization (TextField)</text><ellipse cx="1040" cy="1057.8828" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="166" x="1049" y="1061.2295">type_of_work (TextField)</text><ellipse cx="1040" cy="1074.1797" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="288" x="1049" y="1077.5264">more_organization_information (TextField)</text><polygon fill="#4177AF" points="1040,1086.4766,1036,1092.4766,1044,1092.4766" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="217" x="1049" y="1093.8232">authorizing_official (ForeignKey)</text><polygon fill="#4177AF" points="1040,1102.7734,1036,1108.7734,1044,1108.7734" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="252" x="1049" y="1110.1201">requested_domain (OneToOneField)</text><polygon fill="#4177AF" points="1040,1119.0703,1036,1125.0703,1044,1125.0703" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="157" x="1049" y="1126.417">submitter (ForeignKey)</text><ellipse cx="1040" cy="1139.3672" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="134" x="1049" y="1142.7139">purpose (TextField)</text><ellipse cx="1040" cy="1155.6641" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="271" x="1049" y="1159.0107">no_other_contacts_rationale (TextField)</text><ellipse cx="1040" cy="1171.9609" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="171" x="1049" y="1175.3076">anything_else (TextField)</text><ellipse cx="1040" cy="1188.2578" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="268" x="1049" y="1191.6045">is_policy_acknowledged (BooleanField)</text><polygon fill="#FFFF44" points="1040,1199.5547,1044,1203.5547,1040,1207.5547,1036,1203.5547" style="stroke:#B38D22;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="255" x="1049" y="1207.9014">current_websites (ManyToManyField)</text><polygon fill="#FFFF44" points="1040,1215.8516,1044,1219.8516,1040,1223.8516,1036,1219.8516" style="stroke:#B38D22;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="273" x="1049" y="1224.1982">alternative_domains (ManyToManyField)</text><polygon fill="#FFFF44" points="1040,1232.1484,1044,1236.1484,1040,1240.1484,1036,1236.1484" style="stroke:#B38D22;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="240" x="1049" y="1240.4951">other_contacts (ManyToManyField)</text><line style="stroke:#181818;stroke-width:1.0;" x1="1030" x2="1342" y1="1247.7969" y2="1247.7969"/></g><!--class Domain--><g id="elem_Domain"><rect codeLine="131" fill="#D6F4E9" height="153.7813" id="Domain" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="217" x="717.5" y="1988"/><ellipse cx="735.65" cy="2004" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M738.6188,2009.6406 Q738.0406,2009.9375 737.4,2010.0781 Q736.7594,2010.2344 736.0563,2010.2344 Q733.5563,2010.2344 732.2281,2008.5938 Q730.9156,2006.9375 730.9156,2003.8125 Q730.9156,2000.6875 732.2281,1999.0313 Q733.5563,1997.375 736.0563,1997.375 Q736.7594,1997.375 737.4,1997.5313 Q738.0563,1997.6875 738.6188,1997.9844 L738.6188,2000.7031 Q737.9938,2000.125 737.4,1999.8594 Q736.8063,1999.5781 736.1813,1999.5781 Q734.8375,1999.5781 734.15,2000.6563 Q733.4625,2001.7188 733.4625,2003.8125 Q733.4625,2005.9063 734.15,2006.9844 Q734.8375,2008.0469 736.1813,2008.0469 Q736.8063,2008.0469 737.4,2007.7813 Q737.9938,2007.5 738.6188,2006.9219 L738.6188,2009.6406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="114" x="750.35" y="2008.8467">registrar.Domain</text><rect fill="#FFFFFF" height="15.9688" style="stroke:#181818;stroke-width:1.0;stroke-dasharray:2.0,2.0;" width="58" x="879.5" y="1985"/><text fill="#000000" font-family="sans-serif" font-size="12" font-style="italic" lengthAdjust="spacing" textLength="56" x="880.5" y="1997.1387">Registrar</text><line style="stroke:#181818;stroke-width:0.5;" x1="718.5" x2="933.5" y1="2020" y2="2020"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="51" x="723.5" y="2036.9951">domain</text><line style="stroke:#181818;stroke-width:1.0;" x1="718.5" x2="933.5" y1="2044.2969" y2="2044.2969"/><ellipse cx="728.5" cy="2057.9453" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="112" x="737.5" y="2061.292">id (BigAutoField)</text><ellipse cx="728.5" cy="2074.2422" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="186" x="737.5" y="2077.5889">created_at (DateTimeField)</text><ellipse cx="728.5" cy="2090.5391" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="191" x="737.5" y="2093.8857">updated_at (DateTimeField)</text><ellipse cx="728.5" cy="2106.8359" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="117" x="737.5" y="2110.1826">name (CharField)</text><ellipse cx="728.5" cy="2123.1328" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="133" x="737.5" y="2126.4795">is_active (FSMField)</text><line style="stroke:#181818;stroke-width:1.0;" x1="718.5" x2="933.5" y1="2133.7813" y2="2133.7813"/></g><!--class Website--><g id="elem_Website"><rect codeLine="260" fill="#D6F4E9" height="137.4844" id="Website" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="217" x="1222.5" y="1365"/><ellipse cx="1239.3" cy="1381" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M1242.2688,1386.6406 Q1241.6906,1386.9375 1241.05,1387.0781 Q1240.4094,1387.2344 1239.7063,1387.2344 Q1237.2063,1387.2344 1235.8781,1385.5938 Q1234.5656,1383.9375 1234.5656,1380.8125 Q1234.5656,1377.6875 1235.8781,1376.0313 Q1237.2063,1374.375 1239.7063,1374.375 Q1240.4094,1374.375 1241.05,1374.5313 Q1241.7063,1374.6875 1242.2688,1374.9844 L1242.2688,1377.7031 Q1241.6438,1377.125 1241.05,1376.8594 Q1240.4563,1376.5781 1239.8313,1376.5781 Q1238.4875,1376.5781 1237.8,1377.6563 Q1237.1125,1378.7188 1237.1125,1380.8125 Q1237.1125,1382.9063 1237.8,1383.9844 Q1238.4875,1385.0469 1239.8313,1385.0469 Q1240.4563,1385.0469 1241.05,1384.7813 Q1241.6438,1384.5 1242.2688,1383.9219 L1242.2688,1386.6406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="117" x="1253.7" y="1385.8467">registrar.Website</text><rect fill="#FFFFFF" height="15.9688" style="stroke:#181818;stroke-width:1.0;stroke-dasharray:2.0,2.0;" width="58" x="1384.5" y="1362"/><text fill="#000000" font-family="sans-serif" font-size="12" font-style="italic" lengthAdjust="spacing" textLength="56" x="1385.5" y="1374.1387">Registrar</text><line style="stroke:#181818;stroke-width:0.5;" x1="1223.5" x2="1438.5" y1="1397" y2="1397"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="54" x="1228.5" y="1413.9951">website</text><line style="stroke:#181818;stroke-width:1.0;" x1="1223.5" x2="1438.5" y1="1421.2969" y2="1421.2969"/><ellipse cx="1233.5" cy="1434.9453" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="112" x="1242.5" y="1438.292">id (BigAutoField)</text><ellipse cx="1233.5" cy="1451.2422" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="186" x="1242.5" y="1454.5889">created_at (DateTimeField)</text><ellipse cx="1233.5" cy="1467.5391" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="191" x="1242.5" y="1470.8857">updated_at (DateTimeField)</text><ellipse cx="1233.5" cy="1483.8359" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="132" x="1242.5" y="1487.1826">website (CharField)</text><line style="stroke:#181818;stroke-width:1.0;" x1="1223.5" x2="1438.5" y1="1494.4844" y2="1494.4844"/></g><!--class DomainInformation--><g id="elem_DomainInformation"><rect codeLine="88" fill="#D6F4E9" height="561.2031" id="DomainInformation" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="314" x="836" y="41"/><ellipse cx="863.15" cy="57" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M866.1188,62.6406 Q865.5406,62.9375 864.9,63.0781 Q864.2594,63.2344 863.5563,63.2344 Q861.0563,63.2344 859.7281,61.5938 Q858.4156,59.9375 858.4156,56.8125 Q858.4156,53.6875 859.7281,52.0313 Q861.0563,50.375 863.5563,50.375 Q864.2594,50.375 864.9,50.5313 Q865.5563,50.6875 866.1188,50.9844 L866.1188,53.7031 Q865.4938,53.125 864.9,52.8594 Q864.3063,52.5781 863.6813,52.5781 Q862.3375,52.5781 861.65,53.6563 Q860.9625,54.7188 860.9625,56.8125 Q860.9625,58.9063 861.65,59.9844 Q862.3375,61.0469 863.6813,61.0469 Q864.3063,61.0469 864.9,60.7813 Q865.4938,60.5 866.1188,59.9219 L866.1188,62.6406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="191" x="879.85" y="61.8467">registrar.DomainInformation</text><rect fill="#FFFFFF" height="15.9688" style="stroke:#181818;stroke-width:1.0;stroke-dasharray:2.0,2.0;" width="58" x="1095" y="38"/><text fill="#000000" font-family="sans-serif" font-size="12" font-style="italic" lengthAdjust="spacing" textLength="56" x="1096" y="50.1387">Registrar</text><line style="stroke:#181818;stroke-width:0.5;" x1="837" x2="1149" y1="73" y2="73"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="132" x="842" y="89.9951">domain information</text><line style="stroke:#181818;stroke-width:1.0;" x1="837" x2="1149" y1="97.2969" y2="97.2969"/><ellipse cx="847" cy="110.9453" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="112" x="856" y="114.292">id (BigAutoField)</text><ellipse cx="847" cy="127.2422" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="186" x="856" y="130.5889">created_at (DateTimeField)</text><ellipse cx="847" cy="143.5391" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="191" x="856" y="146.8857">updated_at (DateTimeField)</text><polygon fill="#4177AF" points="847,155.8359,843,161.8359,851,161.8359" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="140" x="856" y="163.1826">creator (ForeignKey)</text><polygon fill="#4177AF" points="847,172.1328,843,178.1328,851,178.1328" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="254" x="856" y="179.4795">domain_application (OneToOneField)</text><ellipse cx="847" cy="192.4297" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="200" x="856" y="195.7764">organization_type (CharField)</text><ellipse cx="847" cy="208.7266" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="283" x="856" y="212.0732">federally_recognized_tribe (BooleanField)</text><ellipse cx="847" cy="225.0234" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="261" x="856" y="228.3701">state_recognized_tribe (BooleanField)</text><ellipse cx="847" cy="241.3203" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="153" x="856" y="244.667">tribe_name (TextField)</text><ellipse cx="847" cy="257.6172" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="180" x="856" y="260.9639">federal_agency (TextField)</text><ellipse cx="847" cy="273.9141" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="162" x="856" y="277.2607">federal_type (CharField)</text><ellipse cx="847" cy="290.2109" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="223" x="856" y="293.5576">is_election_board (BooleanField)</text><ellipse cx="847" cy="306.5078" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="207" x="856" y="309.8545">organization_name (TextField)</text><ellipse cx="847" cy="322.8047" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="172" x="856" y="326.1514">address_line1 (TextField)</text><ellipse cx="847" cy="339.1016" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="174" x="856" y="342.4482">address_line2 (CharField)</text><ellipse cx="847" cy="355.3984" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="99" x="856" y="358.7451">city (TextField)</text><ellipse cx="847" cy="371.6953" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="173" x="856" y="375.042">state_territory (CharField)</text><ellipse cx="847" cy="387.9922" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="133" x="856" y="391.3389">zipcode (CharField)</text><ellipse cx="847" cy="404.2891" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="161" x="856" y="407.6357">urbanization (TextField)</text><ellipse cx="847" cy="420.5859" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="166" x="856" y="423.9326">type_of_work (TextField)</text><ellipse cx="847" cy="436.8828" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="288" x="856" y="440.2295">more_organization_information (TextField)</text><polygon fill="#4177AF" points="847,449.1797,843,455.1797,851,455.1797" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="217" x="856" y="456.5264">authorizing_official (ForeignKey)</text><polygon fill="#4177AF" points="847,465.4766,843,471.4766,851,471.4766" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="173" x="856" y="472.8232">domain (OneToOneField)</text><polygon fill="#4177AF" points="847,481.7734,843,487.7734,851,487.7734" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="157" x="856" y="489.1201">submitter (ForeignKey)</text><ellipse cx="847" cy="502.0703" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="134" x="856" y="505.417">purpose (TextField)</text><ellipse cx="847" cy="518.3672" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="271" x="856" y="521.7139">no_other_contacts_rationale (TextField)</text><ellipse cx="847" cy="534.6641" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="171" x="856" y="538.0107">anything_else (TextField)</text><ellipse cx="847" cy="550.9609" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="268" x="856" y="554.3076">is_policy_acknowledged (BooleanField)</text><ellipse cx="847" cy="567.2578" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="179" x="856" y="570.6045">security_email (EmailField)</text><polygon fill="#FFFF44" points="847,578.5547,851,582.5547,847,586.5547,843,582.5547" style="stroke:#B38D22;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="240" x="856" y="586.9014">other_contacts (ManyToManyField)</text><line style="stroke:#181818;stroke-width:1.0;" x1="837" x2="1149" y1="594.2031" y2="594.2031"/></g><!--class HostIP--><g id="elem_HostIP"><rect codeLine="143" fill="#D6F4E9" height="153.7813" id="HostIP" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="217" x="297.5" y="1356.5"/><ellipse cx="319.7" cy="1372.5" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M322.6688,1378.1406 Q322.0906,1378.4375 321.45,1378.5781 Q320.8094,1378.7344 320.1063,1378.7344 Q317.6063,1378.7344 316.2781,1377.0938 Q314.9656,1375.4375 314.9656,1372.3125 Q314.9656,1369.1875 316.2781,1367.5313 Q317.6063,1365.875 320.1063,1365.875 Q320.8094,1365.875 321.45,1366.0313 Q322.1063,1366.1875 322.6688,1366.4844 L322.6688,1369.2031 Q322.0438,1368.625 321.45,1368.3594 Q320.8563,1368.0781 320.2313,1368.0781 Q318.8875,1368.0781 318.2,1369.1563 Q317.5125,1370.2188 317.5125,1372.3125 Q317.5125,1374.4063 318.2,1375.4844 Q318.8875,1376.5469 320.2313,1376.5469 Q320.8563,1376.5469 321.45,1376.2813 Q322.0438,1376 322.6688,1375.4219 L322.6688,1378.1406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="105" x="335.3" y="1377.3467">registrar.HostIP</text><rect fill="#FFFFFF" height="15.9688" style="stroke:#181818;stroke-width:1.0;stroke-dasharray:2.0,2.0;" width="58" x="459.5" y="1353.5"/><text fill="#000000" font-family="sans-serif" font-size="12" font-style="italic" lengthAdjust="spacing" textLength="56" x="460.5" y="1365.6387">Registrar</text><line style="stroke:#181818;stroke-width:0.5;" x1="298.5" x2="513.5" y1="1388.5" y2="1388.5"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="47" x="303.5" y="1405.4951">host ip</text><line style="stroke:#181818;stroke-width:1.0;" x1="298.5" x2="513.5" y1="1412.7969" y2="1412.7969"/><ellipse cx="308.5" cy="1426.4453" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="112" x="317.5" y="1429.792">id (BigAutoField)</text><ellipse cx="308.5" cy="1442.7422" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="186" x="317.5" y="1446.0889">created_at (DateTimeField)</text><ellipse cx="308.5" cy="1459.0391" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="191" x="317.5" y="1462.3857">updated_at (DateTimeField)</text><ellipse cx="308.5" cy="1475.3359" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="134" x="317.5" y="1478.6826">address (CharField)</text><polygon fill="#4177AF" points="308.5,1487.6328,304.5,1493.6328,312.5,1493.6328" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="122" x="317.5" y="1494.9795">host (ForeignKey)</text><line style="stroke:#181818;stroke-width:1.0;" x1="298.5" x2="513.5" y1="1502.2813" y2="1502.2813"/></g><!--class Host--><g id="elem_Host"><rect codeLine="156" fill="#D6F4E9" height="153.7813" id="Host" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="217" x="266.5" y="1692.5"/><ellipse cx="294.1" cy="1708.5" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M297.0688,1714.1406 Q296.4906,1714.4375 295.85,1714.5781 Q295.2094,1714.7344 294.5063,1714.7344 Q292.0063,1714.7344 290.6781,1713.0938 Q289.3656,1711.4375 289.3656,1708.3125 Q289.3656,1705.1875 290.6781,1703.5313 Q292.0063,1701.875 294.5063,1701.875 Q295.2094,1701.875 295.85,1702.0313 Q296.5063,1702.1875 297.0688,1702.4844 L297.0688,1705.2031 Q296.4438,1704.625 295.85,1704.3594 Q295.2563,1704.0781 294.6313,1704.0781 Q293.2875,1704.0781 292.6,1705.1563 Q291.9125,1706.2188 291.9125,1708.3125 Q291.9125,1710.4063 292.6,1711.4844 Q293.2875,1712.5469 294.6313,1712.5469 Q295.2563,1712.5469 295.85,1712.2813 Q296.4438,1712 297.0688,1711.4219 L297.0688,1714.1406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="93" x="310.9" y="1713.3467">registrar.Host</text><rect fill="#FFFFFF" height="15.9688" style="stroke:#181818;stroke-width:1.0;stroke-dasharray:2.0,2.0;" width="58" x="428.5" y="1689.5"/><text fill="#000000" font-family="sans-serif" font-size="12" font-style="italic" lengthAdjust="spacing" textLength="56" x="429.5" y="1701.6387">Registrar</text><line style="stroke:#181818;stroke-width:0.5;" x1="267.5" x2="482.5" y1="1724.5" y2="1724.5"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="31" x="272.5" y="1741.4951">host</text><line style="stroke:#181818;stroke-width:1.0;" x1="267.5" x2="482.5" y1="1748.7969" y2="1748.7969"/><ellipse cx="277.5" cy="1762.4453" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="112" x="286.5" y="1765.792">id (BigAutoField)</text><ellipse cx="277.5" cy="1778.7422" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="186" x="286.5" y="1782.0889">created_at (DateTimeField)</text><ellipse cx="277.5" cy="1795.0391" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="191" x="286.5" y="1798.3857">updated_at (DateTimeField)</text><ellipse cx="277.5" cy="1811.3359" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="117" x="286.5" y="1814.6826">name (CharField)</text><polygon fill="#4177AF" points="277.5,1823.6328,273.5,1829.6328,281.5,1829.6328" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="142" x="286.5" y="1830.9795">domain (ForeignKey)</text><line style="stroke:#181818;stroke-width:1.0;" x1="267.5" x2="482.5" y1="1838.2813" y2="1838.2813"/></g><!--class UserDomainRole--><g id="elem_UserDomainRole"><rect codeLine="169" fill="#D6F4E9" height="170.0781" id="UserDomainRole" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="273" x="549.5" y="1348.5"/><ellipse cx="564.5" cy="1364.5" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M567.4688,1370.1406 Q566.8906,1370.4375 566.25,1370.5781 Q565.6094,1370.7344 564.9063,1370.7344 Q562.4063,1370.7344 561.0781,1369.0938 Q559.7656,1367.4375 559.7656,1364.3125 Q559.7656,1361.1875 561.0781,1359.5313 Q562.4063,1357.875 564.9063,1357.875 Q565.6094,1357.875 566.25,1358.0313 Q566.9063,1358.1875 567.4688,1358.4844 L567.4688,1361.2031 Q566.8438,1360.625 566.25,1360.3594 Q565.6563,1360.0781 565.0313,1360.0781 Q563.6875,1360.0781 563,1361.1563 Q562.3125,1362.2188 562.3125,1364.3125 Q562.3125,1366.4063 563,1367.4844 Q563.6875,1368.5469 565.0313,1368.5469 Q565.6563,1368.5469 566.25,1368.2813 Q566.8438,1368 567.4688,1367.4219 L567.4688,1370.1406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="177" x="578.5" y="1369.3467">registrar.UserDomainRole</text><rect fill="#FFFFFF" height="15.9688" style="stroke:#181818;stroke-width:1.0;stroke-dasharray:2.0,2.0;" width="58" x="767.5" y="1345.5"/><text fill="#000000" font-family="sans-serif" font-size="12" font-style="italic" lengthAdjust="spacing" textLength="56" x="768.5" y="1357.6387">Registrar</text><line style="stroke:#181818;stroke-width:0.5;" x1="550.5" x2="821.5" y1="1380.5" y2="1380.5"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="116" x="555.5" y="1397.4951">user domain role</text><line style="stroke:#181818;stroke-width:1.0;" x1="550.5" x2="821.5" y1="1404.7969" y2="1404.7969"/><ellipse cx="560.5" cy="1418.4453" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="112" x="569.5" y="1421.792">id (BigAutoField)</text><ellipse cx="560.5" cy="1434.7422" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="186" x="569.5" y="1438.0889">created_at (DateTimeField)</text><ellipse cx="560.5" cy="1451.0391" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="191" x="569.5" y="1454.3857">updated_at (DateTimeField)</text><polygon fill="#4177AF" points="560.5,1463.3359,556.5,1469.3359,564.5,1469.3359" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="122" x="569.5" y="1470.6826">user (ForeignKey)</text><polygon fill="#4177AF" points="560.5,1479.6328,556.5,1485.6328,564.5,1485.6328" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="142" x="569.5" y="1486.9795">domain (ForeignKey)</text><ellipse cx="560.5" cy="1499.9297" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="102" x="569.5" y="1503.2764">role (TextField)</text><line style="stroke:#181818;stroke-width:1.0;" x1="550.5" x2="821.5" y1="1510.5781" y2="1510.5781"/></g><!--class DomainInvitation--><g id="elem_DomainInvitation"><rect codeLine="184" fill="#D6F4E9" height="170.0781" id="DomainInvitation" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="271" x="518.5" y="1684.5"/><ellipse cx="533.5" cy="1700.5" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M536.4688,1706.1406 Q535.8906,1706.4375 535.25,1706.5781 Q534.6094,1706.7344 533.9063,1706.7344 Q531.4063,1706.7344 530.0781,1705.0938 Q528.7656,1703.4375 528.7656,1700.3125 Q528.7656,1697.1875 530.0781,1695.5313 Q531.4063,1693.875 533.9063,1693.875 Q534.6094,1693.875 535.25,1694.0313 Q535.9063,1694.1875 536.4688,1694.4844 L536.4688,1697.2031 Q535.8438,1696.625 535.25,1696.3594 Q534.6563,1696.0781 534.0313,1696.0781 Q532.6875,1696.0781 532,1697.1563 Q531.3125,1698.2188 531.3125,1700.3125 Q531.3125,1702.4063 532,1703.4844 Q532.6875,1704.5469 534.0313,1704.5469 Q534.6563,1704.5469 535.25,1704.2813 Q535.8438,1704 536.4688,1703.4219 L536.4688,1706.1406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="175" x="547.5" y="1705.3467">registrar.DomainInvitation</text><rect fill="#FFFFFF" height="15.9688" style="stroke:#181818;stroke-width:1.0;stroke-dasharray:2.0,2.0;" width="58" x="734.5" y="1681.5"/><text fill="#000000" font-family="sans-serif" font-size="12" font-style="italic" lengthAdjust="spacing" textLength="56" x="735.5" y="1693.6387">Registrar</text><line style="stroke:#181818;stroke-width:0.5;" x1="519.5" x2="788.5" y1="1716.5" y2="1716.5"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="116" x="524.5" y="1733.4951">domain invitation</text><line style="stroke:#181818;stroke-width:1.0;" x1="519.5" x2="788.5" y1="1740.7969" y2="1740.7969"/><ellipse cx="529.5" cy="1754.4453" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="112" x="538.5" y="1757.792">id (BigAutoField)</text><ellipse cx="529.5" cy="1770.7422" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="186" x="538.5" y="1774.0889">created_at (DateTimeField)</text><ellipse cx="529.5" cy="1787.0391" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="191" x="538.5" y="1790.3857">updated_at (DateTimeField)</text><ellipse cx="529.5" cy="1803.3359" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="118" x="538.5" y="1806.6826">email (EmailField)</text><polygon fill="#4177AF" points="529.5,1815.6328,525.5,1821.6328,533.5,1821.6328" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="142" x="538.5" y="1822.9795">domain (ForeignKey)</text><ellipse cx="529.5" cy="1835.9297" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="118" x="538.5" y="1839.2764">status (FSMField)</text><line style="stroke:#181818;stroke-width:1.0;" x1="519.5" x2="788.5" y1="1846.5781" y2="1846.5781"/></g><!--class Nameserver--><g id="elem_Nameserver"><rect codeLine="198" fill="#D6F4E9" height="170.0781" id="Nameserver" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="240" x="22" y="1348.5"/><ellipse cx="37" cy="1364.5" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M39.9688,1370.1406 Q39.3906,1370.4375 38.75,1370.5781 Q38.1094,1370.7344 37.4063,1370.7344 Q34.9063,1370.7344 33.5781,1369.0938 Q32.2656,1367.4375 32.2656,1364.3125 Q32.2656,1361.1875 33.5781,1359.5313 Q34.9063,1357.875 37.4063,1357.875 Q38.1094,1357.875 38.75,1358.0313 Q39.4063,1358.1875 39.9688,1358.4844 L39.9688,1361.2031 Q39.3438,1360.625 38.75,1360.3594 Q38.1563,1360.0781 37.5313,1360.0781 Q36.1875,1360.0781 35.5,1361.1563 Q34.8125,1362.2188 34.8125,1364.3125 Q34.8125,1366.4063 35.5,1367.4844 Q36.1875,1368.5469 37.5313,1368.5469 Q38.1563,1368.5469 38.75,1368.2813 Q39.3438,1368 39.9688,1367.4219 L39.9688,1370.1406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="144" x="51" y="1369.3467">registrar.Nameserver</text><rect fill="#FFFFFF" height="15.9688" style="stroke:#181818;stroke-width:1.0;stroke-dasharray:2.0,2.0;" width="58" x="207" y="1345.5"/><text fill="#000000" font-family="sans-serif" font-size="12" font-style="italic" lengthAdjust="spacing" textLength="56" x="208" y="1357.6387">Registrar</text><line style="stroke:#181818;stroke-width:0.5;" x1="23" x2="261" y1="1380.5" y2="1380.5"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="82" x="28" y="1397.4951">nameserver</text><line style="stroke:#181818;stroke-width:1.0;" x1="23" x2="261" y1="1404.7969" y2="1404.7969"/><ellipse cx="33" cy="1418.4453" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="112" x="42" y="1421.792">id (BigAutoField)</text><ellipse cx="33" cy="1434.7422" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="186" x="42" y="1438.0889">created_at (DateTimeField)</text><ellipse cx="33" cy="1451.0391" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="191" x="42" y="1454.3857">updated_at (DateTimeField)</text><ellipse cx="33" cy="1467.3359" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="117" x="42" y="1470.6826">name (CharField)</text><polygon fill="#4177AF" points="33,1479.6328,29,1485.6328,37,1485.6328" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="142" x="42" y="1486.9795">domain (ForeignKey)</text><polygon fill="#4177AF" points="33,1495.9297,29,1501.9297,37,1501.9297" style="stroke:#1963A0;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="179" x="42" y="1503.2764">host_ptr (OneToOneField)</text><line style="stroke:#181818;stroke-width:1.0;" x1="23" x2="261" y1="1510.5781" y2="1510.5781"/></g><!--class PublicContact--><g id="elem_PublicContact"><rect codeLine="213" fill="#D6F4E9" height="349.3438" id="PublicContact" rx="2.5" ry="2.5" style="stroke:#181818;stroke-width:0.5;" width="252" x="286" y="147"/><ellipse cx="301" cy="163" fill="#ADD1B2" rx="11" ry="11" style="stroke:#181818;stroke-width:1.0;"/><path d="M303.9688,168.6406 Q303.3906,168.9375 302.75,169.0781 Q302.1094,169.2344 301.4063,169.2344 Q298.9063,169.2344 297.5781,167.5938 Q296.2656,165.9375 296.2656,162.8125 Q296.2656,159.6875 297.5781,158.0313 Q298.9063,156.375 301.4063,156.375 Q302.1094,156.375 302.75,156.5313 Q303.4063,156.6875 303.9688,156.9844 L303.9688,159.7031 Q303.3438,159.125 302.75,158.8594 Q302.1563,158.5781 301.5313,158.5781 Q300.1875,158.5781 299.5,159.6563 Q298.8125,160.7188 298.8125,162.8125 Q298.8125,164.9063 299.5,165.9844 Q300.1875,167.0469 301.5313,167.0469 Q302.1563,167.0469 302.75,166.7813 Q303.3438,166.5 303.9688,165.9219 L303.9688,168.6406 Z " fill="#000000"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="156" x="315" y="167.8467">registrar.PublicContact</text><rect fill="#FFFFFF" height="15.9688" style="stroke:#181818;stroke-width:1.0;stroke-dasharray:2.0,2.0;" width="58" x="483" y="144"/><text fill="#000000" font-family="sans-serif" font-size="12" font-style="italic" lengthAdjust="spacing" textLength="56" x="484" y="156.1387">Registrar</text><line style="stroke:#181818;stroke-width:0.5;" x1="287" x2="537" y1="179" y2="179"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="97" x="292" y="195.9951">public contact</text><line style="stroke:#181818;stroke-width:1.0;" x1="287" x2="537" y1="203.2969" y2="203.2969"/><ellipse cx="297" cy="216.9453" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="112" x="306" y="220.292">id (BigAutoField)</text><ellipse cx="297" cy="233.2422" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="186" x="306" y="236.5889">created_at (DateTimeField)</text><ellipse cx="297" cy="249.5391" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="191" x="306" y="252.8857">updated_at (DateTimeField)</text><ellipse cx="297" cy="265.8359" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="167" x="306" y="269.1826">contact_type (CharField)</text><ellipse cx="297" cy="282.1328" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="115" x="306" y="285.4795">name (TextField)</text><ellipse cx="297" cy="298.4297" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="99" x="306" y="301.7764">org (TextField)</text><ellipse cx="297" cy="314.7266" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="126" x="306" y="318.0732">street1 (TextField)</text><ellipse cx="297" cy="331.0234" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="126" x="306" y="334.3701">street2 (TextField)</text><ellipse cx="297" cy="347.3203" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="126" x="306" y="350.667">street3 (TextField)</text><ellipse cx="297" cy="363.6172" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="99" x="306" y="366.9639">city (TextField)</text><ellipse cx="297" cy="379.9141" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="93" x="306" y="383.2607">sp (TextField)</text><ellipse cx="297" cy="396.2109" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="93" x="306" y="399.5576">pc (TextField)</text><ellipse cx="297" cy="412.5078" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="92" x="306" y="415.8545">cc (TextField)</text><ellipse cx="297" cy="428.8047" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="112" x="306" y="432.1514">email (TextField)</text><ellipse cx="297" cy="445.1016" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="112" x="306" y="448.4482">voice (TextField)</text><ellipse cx="297" cy="461.3984" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="95" x="306" y="464.7451">fax (TextField)</text><ellipse cx="297" cy="477.6953" fill="#84BE84" rx="3" ry="3" style="stroke:#038048;stroke-width:1.0;"/><text fill="#000000" font-family="sans-serif" font-size="14" lengthAdjust="spacing" textLength="96" x="306" y="481.042">pw (TextField)</text><line style="stroke:#181818;stroke-width:1.0;" x1="287" x2="537" y1="488.3438" y2="488.3438"/></g><!--link Contact to User--><g id="link_Contact_User"><path codeLine="38" d="M1004,1551.32 C1004,1570.6 1004,1590.86 1004,1610.95 " fill="none" id="Contact-User" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainApplication to User--><g id="link_DomainApplication_User"><path codeLine="78" d="M1154.11,1256.08 C1143.68,1374.43 1130.71,1495.66 1113,1551 C1106.58,1571.05 1098.11,1591.27 1088.69,1610.89 " fill="none" id="DomainApplication-User" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainApplication to User--><g id="link_DomainApplication_User"><path codeLine="79" d="M1186.12,1256.08 C1178.68,1374.43 1165.71,1495.66 1148,1551 C1141.58,1571.05 1133.11,1591.27 1123.46,1610.89 " fill="none" id="DomainApplication-User-1" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainApplication to Contact--><g id="link_DomainApplication_Contact"><path codeLine="80" d="M1046.42,1256.22 C1038.95,1277.02 1032.02,1297.15 1025.91,1315.92 " fill="none" id="DomainApplication-Contact" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainApplication to Contact--><g id="link_DomainApplication_Contact"><path codeLine="82" d="M1071.9,1256.22 C1063.89,1277.02 1056.13,1297.15 1048.9,1315.92 " fill="none" id="DomainApplication-Contact-1" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainApplication to Contact--><g id="link_DomainApplication_Contact"><path codeLine="85" d="M1092.37,1268.33 C1087.31,1280.45 1082.26,1292.29 1077.28,1303.71 " fill="none" id="DomainApplication-Contact-2" style="stroke:#181818;stroke-width:1.0;"/><polygon fill="#181818" points="1097.38,1256.22,1091.3898,1260.2347,1092.7918,1267.3082,1098.782,1263.2935,1097.38,1256.22" style="stroke:#181818;stroke-width:1.0;"/><polygon fill="#181818" points="1071.9,1315.92,1077.9813,1312.0447,1076.743,1304.9407,1070.6617,1308.816,1071.9,1315.92" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainApplication to Domain--><g id="link_DomainApplication_Domain"><path codeLine="81" d="M1195.48,1256.1 C1201.32,1520.28 1201.64,1873.62 1164,1928 C1112.34,2002.63 1012.39,2036.33 934.52,2051.53 " fill="none" id="DomainApplication-Domain" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainApplication to Website--><g id="link_DomainApplication_Website"><path codeLine="83" d="M1268.61,1268.94 C1278.41,1299.33 1287.97,1327.94 1296.59,1352.48 " fill="none" id="DomainApplication-Website" style="stroke:#181818;stroke-width:1.0;"/><polygon fill="#181818" points="1264.53,1256.22,1262.5553,1263.1555,1268.1978,1267.6457,1270.1725,1260.7102,1264.53,1256.22" style="stroke:#181818;stroke-width:1.0;"/><polygon fill="#181818" points="1301.01,1364.92,1302.7667,1357.9261,1296.9865,1353.6146,1295.2298,1360.6085,1301.01,1364.92" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainApplication to Website--><g id="link_DomainApplication_Website"><path codeLine="84" d="M1293.72,1268.94 C1302.5,1299.19 1310.42,1327.68 1316.78,1352.14 " fill="none" id="DomainApplication-Website-1" style="stroke:#181818;stroke-width:1.0;"/><polygon fill="#181818" points="1290.01,1256.22,1287.8527,1263.1009,1293.3746,1267.7387,1295.5318,1260.8578,1290.01,1256.22" style="stroke:#181818;stroke-width:1.0;"/><polygon fill="#181818" points="1320.06,1364.92,1322.4455,1358.1149,1317.0814,1353.2955,1314.6959,1360.1006,1320.06,1364.92" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainInformation to User--><g id="link_DomainInformation_User"><path codeLine="123" d="M944.13,602.07 C894.66,908.1 831.58,1378.26 878,1551 C883.41,1571.12 891.18,1591.31 900.25,1610.83 " fill="none" id="DomainInformation-User" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainInformation to DomainApplication--><g id="link_DomainInformation_DomainApplication"><path codeLine="124" d="M1077.93,602.16 C1083.95,621.98 1090.03,641.99 1096.07,661.89 " fill="none" id="DomainInformation-DomainApplication" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainInformation to Contact--><g id="link_DomainInformation_Contact"><path codeLine="125" d="M972.63,602.24 C969.24,832.64 974.45,1147.79 985.97,1315.8 " fill="none" id="DomainInformation-Contact" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainInformation to Contact--><g id="link_DomainInformation_Contact"><path codeLine="127" d="M995.77,602.24 C998.06,832.64 1001.18,1147.79 1002.84,1315.8 " fill="none" id="DomainInformation-Contact-1" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainInformation to Contact--><g id="link_DomainInformation_Contact"><path codeLine="128" d="M1019.36,615.49 C1026.71,838.66 1027.7,1135.18 1020.32,1302.73 " fill="none" id="DomainInformation-Contact-2" style="stroke:#181818;stroke-width:1.0;"/><polygon fill="#181818" points="1018.91,602.24,1015.1166,608.3727,1019.3186,614.233,1023.112,608.1003,1018.91,602.24" style="stroke:#181818;stroke-width:1.0;"/><polygon fill="#181818" points="1019.72,1315.8,1023.9927,1309.991,1020.2739,1303.8128,1016.0012,1309.6218,1019.72,1315.8" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainInformation to Domain--><g id="link_DomainInformation_Domain"><path codeLine="126" d="M1150.18,427.83 C1226.3,487.38 1311.7,568.6 1361,662 C1546.52,1013.45 1577.85,1172.41 1457,1551 C1394.48,1746.84 1351.95,1802.67 1189,1928 C1113.92,1985.75 1011.67,2021.11 934.82,2041.24 " fill="none" id="DomainInformation-Domain" style="stroke:#181818;stroke-width:1.0;"/></g><!--link HostIP to Host--><g id="link_HostIP_Host"><path codeLine="153" d="M398.93,1510.71 C393.88,1565.1 387.12,1637.95 382.07,1692.33 " fill="none" id="HostIP-Host" style="stroke:#181818;stroke-width:1.0;"/></g><!--link Host to Domain--><g id="link_Host_Domain"><path codeLine="166" d="M423.86,1846.74 C444.92,1875.1 471.51,1905.75 501,1928 C565.7,1976.8 650.49,2011.52 717.24,2033.66 " fill="none" id="Host-Domain" style="stroke:#181818;stroke-width:1.0;"/></g><!--link UserDomainRole to User--><g id="link_UserDomainRole_User"><path codeLine="180" d="M766.24,1518.78 C795.32,1549.32 829.09,1584.79 861.89,1619.24 " fill="none" id="UserDomainRole-User" style="stroke:#181818;stroke-width:1.0;"/></g><!--link UserDomainRole to Domain--><g id="link_UserDomainRole_Domain"><path codeLine="181" d="M757.74,1518.62 C777.33,1546.31 796.14,1578.39 807,1611 C849.28,1737.96 843.78,1896.07 835.32,1987.96 " fill="none" id="UserDomainRole-Domain" style="stroke:#181818;stroke-width:1.0;"/></g><!--link DomainInvitation to Domain--><g id="link_DomainInvitation_Domain"><path codeLine="195" d="M703.29,1854.61 C727.91,1896.61 757.47,1947.06 781.41,1987.9 " fill="none" id="DomainInvitation-Domain" style="stroke:#181818;stroke-width:1.0;"/></g><!--link Nameserver to Domain--><g id="link_Nameserver_Domain"><path codeLine="209" d="M135.07,1518.58 C130.97,1628.12 141.5,1818.55 249,1928 C313.41,1993.58 568.85,2033.85 717.38,2052.21 " fill="none" id="Nameserver-Domain" style="stroke:#181818;stroke-width:1.0;"/></g><!--link Nameserver to Host--><g id="link_Nameserver_Host"><path codeLine="210" d="M200.8,1518.78 C238,1572.12 285.72,1640.52 321.8,1692.24 " fill="none" id="Nameserver-Host" style="stroke:#181818;stroke-width:1.0;"/></g><!--link User to Domain--><g id="link_User_Domain"><path codeLine="257" d="M901.22,1939.98 C893.66,1952.44 886.23,1964.68 879.17,1976.33 " fill="none" id="User-Domain" style="stroke:#181818;stroke-width:1.0;"/><polygon fill="#181818" points="908.23,1928.42,901.6997,1931.4787,902.0115,1938.6831,908.5418,1935.6244,908.23,1928.42" style="stroke:#181818;stroke-width:1.0;"/><polygon fill="#181818" points="872.24,1987.76,878.7691,1984.6989,878.4546,1977.4946,871.9255,1980.5558,872.24,1987.76" style="stroke:#181818;stroke-width:1.0;"/></g><!--SRC=[xLXjZzis3FwUNo7qlcGzP42xOS20OK1VTcWnj3jqD-oZeCYCezMHN4dEDLVqVdlfbrmjctHIu9eDG_CX2KJA8XzI3sdgGeKW7dZ8JOXU-U-VEHkLZkAND_kbNny85SHOuopxQVKZ_2m-p0H-TBlQ_5yicfzpOJ8nVsho9rLq5mQAxBjkNNjG4JAfefW_ntzNPWE9GbLcKmgteWhWnVmF2rSElvAzA-D3b5PjGColu7rCX1kJPGLmKWI3tHbDBAXrs2XJ4ElbsbdKlwn_NbUR9VYU617wE1j3kbZqSFuBNPpDPdeOeUSEJxHFoh8mMaNZx4IinhffrB96BjHdXTC4y5o4g68LnFpYprT9O9idEupjXVDWSlixxFOYOxSGeibf-RbmFbVMt3HkoBWh4VrdQ-NJv80Cl2gAdVIWNOxgQ6JqPedAJvqhGDcHcN2aRYFYqgSxMAeShDvDA326co2X0DtujdJAPxGD2GQCAIhBF8GW2sFXqG7vOy8KRIBbG0jL1EyD1cX7RBmnfNOPvLtbbtTcKtSFCP5k9Q-TVqjTQam5cNXkxChv3VsuMw6gk7RUt1YRurDNHXjLK0dduLs56OTHx-u9HJYt8bJBZOaHoAGiAr-wG8N1EkdY6hpig3D8tnYiI9fHTXVNjS5Ga4_3_2WTNkATLFgjTTS5P3cGIN8cTEKzs2YlOHbC1Bo5h_3nLwx-JXHLWLvPj6eB7GIyRkeBepRWlJ4zZHdmAsbtb7lyXbPylmSyN2mUzhRytSRZAs_P6yQMbfUzotEejFHroTBIkucdwWqcIaXhbEpLEwQ9-5PBljMIUwqb_wSAWl41h24OEjbrlx_LF_TK3lgKSfZWMUqH3nw_bQFk0sVTxm5Voje7gPhavrEr-orzK4o4Jc5xCNmHQ3A3yFM52_7bvOIldKBgwneNXIbFvMn7UYD_Rrj3HdTyS0qw5vACgTSuF0wWGM3nhqRzCnSUWGDvpmWSQcvefM-GhIOG6IYcs3HJXSuwZ-BJJZFueZ4k2hLr8muVu3TmTOfBfrHvqFUajJNns6xnJfLf5lVoKsKeKx-ceASd5qmfk_FtI4XVuut1M6mn7tWmUqefZFPEy1-zuIq3oZBImxi1Y3q_fyijfyYnvcMrnAxWyAWprKknBXlPQUUUv_lZkCwUwTAndoMRR0yG7x6Inwpa1qB2TV8bDHlLr0Fq5qnNjyveyjL8lQVEkwQvaYaa1ye7KJIE2t-9P-5hHqO4hfay5owlBmqPUEoKGbM2Rqi5rPFN4gRDwe_4wQCP6KEFhJbHhLQqARssZXBNEIx_SVXAc37UJWxFpqJkNLNoewWQ2ObORKm8oBsyulHCYoYdmwwS9_9kC3EHIdkDD9kwcTof-AGxRXJwzdfy0W00]--></g></svg>
\ No newline at end of file
diff --git a/src/Pipfile b/src/Pipfile
index d7551f63b..a3a20051c 100644
--- a/src/Pipfile
+++ b/src/Pipfile
@@ -37,3 +37,4 @@ django-webtest = "*"
types-cachetools = "*"
boto3-mocking = "*"
boto3-stubs = "*"
+django-model2puml = "*"
diff --git a/src/Pipfile.lock b/src/Pipfile.lock
index e77116ef5..04722b876 100644
--- a/src/Pipfile.lock
+++ b/src/Pipfile.lock
@@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
- "sha256": "ebec8b958bcfde525ad74aa1e777b55855e86b2d63264612bc2855bf167070b1"
+ "sha256": "b6c1a957da6c715c734906059a81da21cb0eb4c4ab04f204eb58a48ddb8f7234"
},
"pipfile-spec": 6,
"requires": {},
@@ -24,19 +24,19 @@
},
"boto3": {
"hashes": [
- "sha256:38ca632be379963f2a2749b5f63a81fe1679913b954914f470ad282c77674bbc",
- "sha256:4d575c180312bec6108852bae12e6396b9d1bb404154d652c57ee849c62fbb83"
+ "sha256:62285ecee7629a4388d55ae369536f759622d68d5b9a0ced7c58a0c1a409c0f7",
+ "sha256:8ff0af0b25266a01616396abc19eb34dc3d44bd867fa4158985924128b9034fb"
],
"index": "pypi",
- "version": "==1.26.122"
+ "version": "==1.26.133"
},
"botocore": {
"hashes": [
- "sha256:9e4984a9e9777c6b949aa1e98323fa35480d9f99d447af7e179ae611f7ed5af9",
- "sha256:c3b41078d235761b9c5dc22f534a76952622ef96787b96bbd10242ec4d73f2a5"
+ "sha256:7b38e540f73c921d8cb0ac72794072000af9e10758c04ba7f53d5629cc52fa87",
+ "sha256:b266185d7414a559952569005009a400de50af91fd3da44f05cf05b00946c4a7"
],
"markers": "python_version >= '3.7'",
- "version": "==1.29.122"
+ "version": "==1.29.133"
},
"cachetools": {
"hashes": [
@@ -48,11 +48,11 @@
},
"certifi": {
"hashes": [
- "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3",
- "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"
+ "sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7",
+ "sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716"
],
"markers": "python_version >= '3.6'",
- "version": "==2022.12.7"
+ "version": "==2023.5.7"
},
"cfenv": {
"hashes": [
@@ -261,11 +261,11 @@
},
"django": {
"hashes": [
- "sha256:ad33ed68db9398f5dfb33282704925bce044bef4261cd4fb59e4e7f9ae505a78",
- "sha256:c36e2ab12824e2ac36afa8b2515a70c53c7742f0d6eaefa7311ec379558db997"
+ "sha256:066b6debb5ac335458d2a713ed995570536c8b59a580005acb0732378d5eb1ee",
+ "sha256:7efa6b1f781a6119a10ac94b4794ded90db8accbe7802281cd26f8664ffed59c"
],
"index": "pypi",
- "version": "==4.2"
+ "version": "==4.2.1"
},
"django-allow-cidr": {
"hashes": [
@@ -338,11 +338,11 @@
},
"faker": {
"hashes": [
- "sha256:49060d40e6659e116f53353c5771ad2f2cbcd12b15771f49e3000a3a451f13ec",
- "sha256:ac903ba8cb5adbce2cdd15e5536118d484bbe01126f3c774dd9f6df77b61232d"
+ "sha256:38dbc3b80e655d7301e190426ab30f04b6b7f6ca4764c5dd02772ffde0fa6dcd",
+ "sha256:f02c6d3fdb5bc781f80b440cf2bdec336ed47ecfb8d620b20c3d4188ed051831"
],
"index": "pypi",
- "version": "==18.6.0"
+ "version": "==18.7.0"
},
"furl": {
"hashes": [
@@ -623,19 +623,19 @@
},
"requests": {
"hashes": [
- "sha256:e8f3c9be120d3333921d213eef078af392fba3933ab7ed2d1cba3b56f2568c3b",
- "sha256:f2e34a75f4749019bb0e3effb66683630e4ffeaf75819fb51bebef1bf5aef059"
+ "sha256:10e94cc4f3121ee6da529d358cdaeaff2f1c409cd377dbc72b825852f2f7e294",
+ "sha256:239d7d4458afcb28a692cdd298d87542235f4ca8d36d03a15bfc128a6559a2f4"
],
"index": "pypi",
- "version": "==2.29.0"
+ "version": "==2.30.0"
},
"s3transfer": {
"hashes": [
- "sha256:06176b74f3a15f61f1b4f25a1fc29a4429040b7647133a463da8fa5bd28d5ecd",
- "sha256:2ed07d3866f523cc561bf4a00fc5535827981b117dd7876f036b0c1aca42c947"
+ "sha256:3c0da2d074bf35d6870ef157158641178a4204a6e689e82546083e31e0311346",
+ "sha256:640bb492711f4c0c0905e1f62b6aaeb771881935ad27884852411f8e9cacbca9"
],
"markers": "python_version >= '3.7'",
- "version": "==0.6.0"
+ "version": "==0.6.1"
},
"setuptools": {
"hashes": [
@@ -752,11 +752,11 @@
},
"boto3": {
"hashes": [
- "sha256:38ca632be379963f2a2749b5f63a81fe1679913b954914f470ad282c77674bbc",
- "sha256:4d575c180312bec6108852bae12e6396b9d1bb404154d652c57ee849c62fbb83"
+ "sha256:62285ecee7629a4388d55ae369536f759622d68d5b9a0ced7c58a0c1a409c0f7",
+ "sha256:8ff0af0b25266a01616396abc19eb34dc3d44bd867fa4158985924128b9034fb"
],
"index": "pypi",
- "version": "==1.26.122"
+ "version": "==1.26.133"
},
"boto3-mocking": {
"hashes": [
@@ -768,27 +768,27 @@
},
"boto3-stubs": {
"hashes": [
- "sha256:401e7fe51d88a51b527d883d195ed20c7f57aeb2c0aea24bbb3e911b6d2ad3aa",
- "sha256:743a37bfd7d1eed4d67cdf825283abc1d93b7900b81d7426aab7e691e075c897"
+ "sha256:a921814574761842073822dc5e9fc7ca4f1c5fdeaa53d83cd8831e060dae09c8",
+ "sha256:cc6a662700e755c1e3dec2383c146b89cd8c70b5921033504bfb8367d03a538f"
],
"index": "pypi",
- "version": "==1.26.122"
+ "version": "==1.26.133"
},
"botocore": {
"hashes": [
- "sha256:9e4984a9e9777c6b949aa1e98323fa35480d9f99d447af7e179ae611f7ed5af9",
- "sha256:c3b41078d235761b9c5dc22f534a76952622ef96787b96bbd10242ec4d73f2a5"
+ "sha256:7b38e540f73c921d8cb0ac72794072000af9e10758c04ba7f53d5629cc52fa87",
+ "sha256:b266185d7414a559952569005009a400de50af91fd3da44f05cf05b00946c4a7"
],
"markers": "python_version >= '3.7'",
- "version": "==1.29.122"
+ "version": "==1.29.133"
},
"botocore-stubs": {
"hashes": [
- "sha256:59873a3b535ec3ff0b6bf5f41c9f8a0f8c48032a871bea4d6e4faebbbfc68e8b",
- "sha256:e6e6c527a6cac0ec69dd1b755d530c9b2dab01d423ce47bdc636dd01ebb01b1b"
+ "sha256:5f6f1967d23c45834858a055cbf65b66863f9f28d05f32f57bf52864a13512d9",
+ "sha256:622c4a5cd740498439008d81c5ded612146f4f0d575341c12591f978edbbe733"
],
"markers": "python_version >= '3.7' and python_version < '4.0'",
- "version": "==1.29.122"
+ "version": "==1.29.130"
},
"click": {
"hashes": [
@@ -800,11 +800,11 @@
},
"django": {
"hashes": [
- "sha256:ad33ed68db9398f5dfb33282704925bce044bef4261cd4fb59e4e7f9ae505a78",
- "sha256:c36e2ab12824e2ac36afa8b2515a70c53c7742f0d6eaefa7311ec379558db997"
+ "sha256:066b6debb5ac335458d2a713ed995570536c8b59a580005acb0732378d5eb1ee",
+ "sha256:7efa6b1f781a6119a10ac94b4794ded90db8accbe7802281cd26f8664ffed59c"
],
"index": "pypi",
- "version": "==4.2"
+ "version": "==4.2.1"
},
"django-debug-toolbar": {
"hashes": [
@@ -814,6 +814,13 @@
"index": "pypi",
"version": "==4.0.0"
},
+ "django-model2puml": {
+ "hashes": [
+ "sha256:6e773d742e556020a04d3216ce5dee5d3551da162e2d42a997f85b4ed1854771"
+ ],
+ "index": "pypi",
+ "version": "==0.4.1"
+ },
"django-stubs": {
"hashes": [
"sha256:93baff824f0a056e71036b423b942a74f07b909e45e3fa38185b910f597c5c08",
@@ -896,35 +903,35 @@
},
"mypy": {
"hashes": [
- "sha256:023fe9e618182ca6317ae89833ba422c411469156b690fde6a315ad10695a521",
- "sha256:031fc69c9a7e12bcc5660b74122ed84b3f1c505e762cc4296884096c6d8ee140",
- "sha256:2de7babe398cb7a85ac7f1fd5c42f396c215ab3eff731b4d761d68d0f6a80f48",
- "sha256:2e93a8a553e0394b26c4ca683923b85a69f7ccdc0139e6acd1354cc884fe0128",
- "sha256:390bc685ec209ada4e9d35068ac6988c60160b2b703072d2850457b62499e336",
- "sha256:3a2d219775a120581a0ae8ca392b31f238d452729adbcb6892fa89688cb8306a",
- "sha256:3efde4af6f2d3ccf58ae825495dbb8d74abd6d176ee686ce2ab19bd025273f41",
- "sha256:4a99fe1768925e4a139aace8f3fb66db3576ee1c30b9c0f70f744ead7e329c9f",
- "sha256:4b41412df69ec06ab141808d12e0bf2823717b1c363bd77b4c0820feaa37249e",
- "sha256:4c8d8c6b80aa4a1689f2a179d31d86ae1367ea4a12855cc13aa3ba24bb36b2d8",
- "sha256:4d19f1a239d59f10fdc31263d48b7937c585810288376671eaf75380b074f238",
- "sha256:4e4a682b3f2489d218751981639cffc4e281d548f9d517addfd5a2917ac78119",
- "sha256:695c45cea7e8abb6f088a34a6034b1d273122e5530aeebb9c09626cea6dca4cb",
- "sha256:701189408b460a2ff42b984e6bd45c3f41f0ac9f5f58b8873bbedc511900086d",
- "sha256:70894c5345bea98321a2fe84df35f43ee7bb0feec117a71420c60459fc3e1eed",
- "sha256:8293a216e902ac12779eb7a08f2bc39ec6c878d7c6025aa59464e0c4c16f7eb9",
- "sha256:8d26b513225ffd3eacece727f4387bdce6469192ef029ca9dd469940158bc89e",
- "sha256:a197ad3a774f8e74f21e428f0de7f60ad26a8d23437b69638aac2764d1e06a6a",
- "sha256:bea55fc25b96c53affab852ad94bf111a3083bc1d8b0c76a61dd101d8a388cf5",
- "sha256:c9a084bce1061e55cdc0493a2ad890375af359c766b8ac311ac8120d3a472950",
- "sha256:d0e9464a0af6715852267bf29c9553e4555b61f5904a4fc538547a4d67617937",
- "sha256:d8e9187bfcd5ffedbe87403195e1fc340189a68463903c39e2b63307c9fa0394",
- "sha256:eaeaa0888b7f3ccb7bcd40b50497ca30923dba14f385bde4af78fac713d6d6f6",
- "sha256:f46af8d162f3d470d8ffc997aaf7a269996d205f9d746124a179d3abe05ac602",
- "sha256:f70a40410d774ae23fcb4afbbeca652905a04de7948eaf0b1789c8d1426b72d1",
- "sha256:fe91be1c51c90e2afe6827601ca14353bbf3953f343c2129fa1e247d55fd95ba"
+ "sha256:1c4c42c60a8103ead4c1c060ac3cdd3ff01e18fddce6f1016e08939647a0e703",
+ "sha256:44797d031a41516fcf5cbfa652265bb994e53e51994c1bd649ffcd0c3a7eccbf",
+ "sha256:473117e310febe632ddf10e745a355714e771ffe534f06db40702775056614c4",
+ "sha256:4c99c3ecf223cf2952638da9cd82793d8f3c0c5fa8b6ae2b2d9ed1e1ff51ba85",
+ "sha256:550a8b3a19bb6589679a7c3c31f64312e7ff482a816c96e0cecec9ad3a7564dd",
+ "sha256:658fe7b674769a0770d4b26cb4d6f005e88a442fe82446f020be8e5f5efb2fae",
+ "sha256:6e33bb8b2613614a33dff70565f4c803f889ebd2f859466e42b46e1df76018dd",
+ "sha256:6e42d29e324cdda61daaec2336c42512e59c7c375340bd202efa1fe0f7b8f8ca",
+ "sha256:74bc9b6e0e79808bf8678d7678b2ae3736ea72d56eede3820bd3849823e7f305",
+ "sha256:76ec771e2342f1b558c36d49900dfe81d140361dd0d2df6cd71b3db1be155409",
+ "sha256:7d23370d2a6b7a71dc65d1266f9a34e4cde9e8e21511322415db4b26f46f6b8c",
+ "sha256:87df44954c31d86df96c8bd6e80dfcd773473e877ac6176a8e29898bfb3501cb",
+ "sha256:8c5979d0deb27e0f4479bee18ea0f83732a893e81b78e62e2dda3e7e518c92ee",
+ "sha256:95d8d31a7713510685b05fbb18d6ac287a56c8f6554d88c19e73f724a445448a",
+ "sha256:a22435632710a4fcf8acf86cbd0d69f68ac389a3892cb23fbad176d1cddaf228",
+ "sha256:a8763e72d5d9574d45ce5881962bc8e9046bf7b375b0abf031f3e6811732a897",
+ "sha256:c1eb485cea53f4f5284e5baf92902cd0088b24984f4209e25981cc359d64448d",
+ "sha256:c5d2cc54175bab47011b09688b418db71403aefad07cbcd62d44010543fc143f",
+ "sha256:cbc07246253b9e3d7d74c9ff948cd0fd7a71afcc2b77c7f0a59c26e9395cb152",
+ "sha256:d0b6c62206e04061e27009481cb0ec966f7d6172b5b936f3ead3d74f29fe3dcf",
+ "sha256:ddae0f39ca146972ff6bb4399f3b2943884a774b8771ea0a8f50e971f5ea5ba8",
+ "sha256:e1f4d16e296f5135624b34e8fb741eb0eadedca90862405b1f1fde2040b9bd11",
+ "sha256:e86c2c6852f62f8f2b24cb7a613ebe8e0c7dc1402c61d36a609174f63e0ff017",
+ "sha256:ebc95f8386314272bbc817026f8ce8f4f0d2ef7ae44f947c4664efac9adec929",
+ "sha256:f9dca1e257d4cc129517779226753dbefb4f2266c4eaad610fc15c6a7e14283e",
+ "sha256:faff86aa10c1aa4a10e1a301de160f3d8fc8703b88c7e98de46b531ff1276a9a"
],
"index": "pypi",
- "version": "==1.2.0"
+ "version": "==1.3.0"
},
"mypy-extensions": {
"hashes": [
@@ -968,11 +975,11 @@
},
"platformdirs": {
"hashes": [
- "sha256:47692bc24c1958e8b0f13dd727307cff1db103fca36399f457da8e05f222fdc4",
- "sha256:7954a68d0ba23558d753f73437c55f89027cf8f5108c19844d4b82e5af396335"
+ "sha256:412dae91f52a6f84830f39a8078cecd0e866cb72294a5c66808e74d5e88d251f",
+ "sha256:e2378146f1964972c03c085bb5662ae80b2b8c06226c54b2ff4aa9483e8a13a5"
],
"markers": "python_version >= '3.7'",
- "version": "==3.5.0"
+ "version": "==3.5.1"
},
"pycodestyle": {
"hashes": [
@@ -1062,11 +1069,11 @@
},
"s3transfer": {
"hashes": [
- "sha256:06176b74f3a15f61f1b4f25a1fc29a4429040b7647133a463da8fa5bd28d5ecd",
- "sha256:2ed07d3866f523cc561bf4a00fc5535827981b117dd7876f036b0c1aca42c947"
+ "sha256:3c0da2d074bf35d6870ef157158641178a4204a6e689e82546083e31e0311346",
+ "sha256:640bb492711f4c0c0905e1f62b6aaeb771881935ad27884852411f8e9cacbca9"
],
"markers": "python_version >= '3.7'",
- "version": "==0.6.0"
+ "version": "==0.6.1"
},
"six": {
"hashes": [
@@ -1118,11 +1125,11 @@
},
"types-awscrt": {
"hashes": [
- "sha256:40854d9d7ce055620d5d41e5adc84df11b879aedbd2cf20de84e73f084aa5797",
- "sha256:fe38c6fd71199a9f739b69a7c2f3a574585457c4f63730a62830628a7bffc5b0"
+ "sha256:9e447df3ad46767887d14fa9c856df94f80e8a0a7f0169577ab23b52ee37bcdf",
+ "sha256:e28fb3f20568ce9e96e33e01e0b87b891822f36b8f368adb582553b016d4aa08"
],
"markers": "python_version >= '3.7' and python_version < '4.0'",
- "version": "==0.16.16"
+ "version": "==0.16.17"
},
"types-cachetools": {
"hashes": [
@@ -1148,26 +1155,26 @@
},
"types-requests": {
"hashes": [
- "sha256:0d580652ce903f643f8c3b494dd01d29367ea57cea0c7ad7f65cf3169092edb0",
- "sha256:cc1aba862575019306b2ed134eb1ea994cab1c887a22e18d3383e6dd42e9789b"
+ "sha256:c6cf08e120ca9f0dc4fa4e32c3f953c3fba222bcc1db6b97695bce8da1ba9864",
+ "sha256:dec781054324a70ba64430ae9e62e7e9c8e4618c185a5cb3f87a6738251b5a31"
],
"index": "pypi",
- "version": "==2.28.11.17"
+ "version": "==2.30.0.0"
},
"types-s3transfer": {
"hashes": [
- "sha256:40e665643f0647832d51c4a26d8a8275cda9134b02bf22caf28198b79bcad382",
- "sha256:d9c669b30fdd61347720434aacb8ecc4645d900712a70b10f495104f9039c07b"
+ "sha256:6d1ac1dedac750d570428362acdf60fdd4f277b0788855c3894d3226756b2bfb",
+ "sha256:75ac1d7143d58c1e6af467cfd4a96c67ee058a3adf7c249d9309999e1f5f41e4"
],
"markers": "python_version >= '3.7' and python_version < '4.0'",
- "version": "==0.6.0.post7"
+ "version": "==0.6.1"
},
"types-urllib3": {
"hashes": [
- "sha256:04235e792139cf3624b25d38faab593456738fbdb7439634046172e3b1339400",
- "sha256:697102ddf4f781eed6f692353f40cee1098643526f5a8b99f49d2ede90fd3754"
+ "sha256:3300538c9dc11dad32eae4827ac313f5d986b8b21494801f1bf97a1ac6c03ae5",
+ "sha256:5dbd1d2bef14efee43f5318b5d36d805a489f6600252bb53626d4bfafd95e27c"
],
- "version": "==1.26.25.11"
+ "version": "==1.26.25.13"
},
"typing-extensions": {
"hashes": [
diff --git a/src/registrar/config/settings.py b/src/registrar/config/settings.py
index 9491b354a..ce6307e3d 100644
--- a/src/registrar/config/settings.py
+++ b/src/registrar/config/settings.py
@@ -109,6 +109,8 @@
"registrar",
# Our internal API application
"api",
+ # Only for generating documentation, uncomment to run manage.py generate_puml
+ # "puml_generator",
]
# Middleware are routines for processing web requests.
|
readthedocs__readthedocs.org-11075 | Build: support Ruby under `build.tools`
We should add support for Ruby on `build.tools`. It will be useful for doctools like Jekyll.
Work required:
- [x] Update the documentation
- [x] Install asdf-ruby (https://github.com/asdf-vm/asdf-ruby) on Docker images
- [x] Compile latest Ruby version and upload it S3 (happening at https://app.circleci.com/pipelines/github/readthedocs/readthedocs-docker-images/289/workflows/f1bc7c62-02d8-4353-ac94-972eb58b0675/jobs/503)
- [x] Update `settings.py` to add this tool and version
- [x] Update config v2 to accept this value
- [x] Create a branch on `test-builds` for this use case
> **Note**: we had a support request for this at https://github.com/readthedocs/readthedocs.org/issues/9599#issuecomment-1560011462
| [
{
"content": "\"\"\"\nDefine constants here to allow import them without any external dependency.\n\nThere are situations where we want to have access to these values without Django installed\n(e.g. common/dockerfiles/tasks.py)\n\nNote these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.\n\"\"\"\n\nDOCKER_DEFAULT_IMAGE = \"readthedocs/build\"\n\n# Adding a new tool/version to this setting requires:\n#\n# - a mapping between the expected version in the config file, to the full\n# version installed via asdf (found via ``asdf list all <tool>``)\n#\n# - running the script ``./scripts/compile_version_upload.sh`` in\n# development and production environments to compile and cache the new\n# tool/version\n#\n# Note that when updating this options, you should also update the file:\n# readthedocs/rtd_tests/fixtures/spec/v2/schema.json\nRTD_DOCKER_BUILD_SETTINGS = {\n # Mapping of build.os options to docker image.\n \"os\": {\n \"ubuntu-20.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04\",\n \"ubuntu-22.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04\",\n },\n # Mapping of build.tools options to specific versions.\n \"tools\": {\n \"python\": {\n \"2.7\": \"2.7.18\",\n \"3.6\": \"3.6.15\",\n \"3.7\": \"3.7.17\",\n \"3.8\": \"3.8.18\",\n \"3.9\": \"3.9.18\",\n \"3.10\": \"3.10.13\",\n \"3.11\": \"3.11.6\",\n \"3.12\": \"3.12.0\",\n # Always point to the latest stable release.\n \"3\": \"3.12.0\",\n \"miniconda3-4.7\": \"miniconda3-4.7.12\",\n \"mambaforge-4.10\": \"mambaforge-4.10.3-10\",\n \"mambaforge-22.9\": \"mambaforge-22.9.0-3\",\n },\n \"nodejs\": {\n \"14\": \"14.20.1\",\n \"16\": \"16.18.1\",\n \"18\": \"18.16.1\", # LTS\n \"19\": \"19.0.1\",\n \"20\": \"20.3.1\",\n },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n \"1.64\": \"1.64.0\",\n \"1.70\": \"1.70.0\",\n },\n \"golang\": {\n \"1.17\": \"1.17.13\",\n \"1.18\": \"1.18.10\",\n \"1.19\": \"1.19.10\",\n \"1.20\": \"1.20.5\",\n },\n },\n}\n",
"path": "readthedocs/builds/constants_docker.py"
}
] | [
{
"content": "\"\"\"\nDefine constants here to allow import them without any external dependency.\n\nThere are situations where we want to have access to these values without Django installed\n(e.g. common/dockerfiles/tasks.py)\n\nNote these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.\n\"\"\"\n\nDOCKER_DEFAULT_IMAGE = \"readthedocs/build\"\n\n# Adding a new tool/version to this setting requires:\n#\n# - a mapping between the expected version in the config file, to the full\n# version installed via asdf (found via ``asdf list all <tool>``)\n#\n# - running the script ``./scripts/compile_version_upload.sh`` in\n# development and production environments to compile and cache the new\n# tool/version\n#\n# Note that when updating this options, you should also update the file:\n# readthedocs/rtd_tests/fixtures/spec/v2/schema.json\nRTD_DOCKER_BUILD_SETTINGS = {\n # Mapping of build.os options to docker image.\n \"os\": {\n \"ubuntu-20.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04\",\n \"ubuntu-22.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04\",\n },\n # Mapping of build.tools options to specific versions.\n \"tools\": {\n \"python\": {\n \"2.7\": \"2.7.18\",\n \"3.6\": \"3.6.15\",\n \"3.7\": \"3.7.17\",\n \"3.8\": \"3.8.18\",\n \"3.9\": \"3.9.18\",\n \"3.10\": \"3.10.13\",\n \"3.11\": \"3.11.6\",\n \"3.12\": \"3.12.0\",\n # Always point to the latest stable release.\n \"3\": \"3.12.0\",\n \"miniconda3-4.7\": \"miniconda3-4.7.12\",\n \"mambaforge-4.10\": \"mambaforge-4.10.3-10\",\n \"mambaforge-22.9\": \"mambaforge-22.9.0-3\",\n },\n \"nodejs\": {\n \"14\": \"14.20.1\",\n \"16\": \"16.18.1\",\n \"18\": \"18.16.1\", # LTS\n \"19\": \"19.0.1\",\n \"20\": \"20.3.1\",\n },\n \"ruby\": {\n \"3.3\": \"3.3.0\",\n },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n \"1.64\": \"1.64.0\",\n \"1.70\": \"1.70.0\",\n },\n \"golang\": {\n \"1.17\": \"1.17.13\",\n \"1.18\": \"1.18.10\",\n \"1.19\": \"1.19.10\",\n \"1.20\": \"1.20.5\",\n },\n },\n}\n",
"path": "readthedocs/builds/constants_docker.py"
}
] | diff --git a/docs/user/config-file/v2.rst b/docs/user/config-file/v2.rst
index b5538ac396a..3d23033fac2 100644
--- a/docs/user/config-file/v2.rst
+++ b/docs/user/config-file/v2.rst
@@ -283,7 +283,7 @@ build.tools
Version specifiers for each tool. It must contain at least one tool.
:Type: ``dict``
-:Options: ``python``, ``nodejs``, ``rust``, ``golang``
+:Options: ``python``, ``nodejs``, ``ruby``, ``rust``, ``golang``
:Required: ``true``
build.tools.python
@@ -326,6 +326,15 @@ Node.js version to use.
- ``19``
- ``20``
+build.tools.ruby
+````````````````
+
+Ruby version to use.
+
+:Type: ``string``
+:Options:
+ - ``3.3``
+
build.tools.rust
````````````````
diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py
index daad082dd5b..dd4bef2c706 100644
--- a/readthedocs/builds/constants_docker.py
+++ b/readthedocs/builds/constants_docker.py
@@ -50,6 +50,9 @@
"19": "19.0.1",
"20": "20.3.1",
},
+ "ruby": {
+ "3.3": "3.3.0",
+ },
"rust": {
"1.55": "1.55.0",
"1.61": "1.61.0",
|
PaddlePaddle__PaddleSpeech-1398 | ERROR: ImportError: cannot import name '__version__' from 'paddlespeech'
Hi there, when i managed to execute the demo command:
`(/home/berg/PaddleSpeech/tools/venvs) root@bergtts:~/PaddleSpeech# paddlespeech tts --input "你好,欢迎使用飞桨深度学习框架!" --output output.wav`
it complains the following error.
```
Traceback (most recent call last):
File "/home/berg/PaddleSpeech/tools/venvs/bin/paddlespeech", line 33, in <module>
sys.exit(load_entry_point('paddlespeech', 'console_scripts', 'paddlespeech')())
File "/home/berg/PaddleSpeech/tools/venvs/bin/paddlespeech", line 25, in importlib_load_entry_point
return next(matches).load()
File "/home/berg/PaddleSpeech/tools/venvs/lib/python3.7/site-packages/importlib_metadata/__init__.py", line 167, in load
module = import_module(match.group('module'))
File "/home/berg/PaddleSpeech/tools/venvs/lib/python3.7/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1006, in _gcd_import
File "<frozen importlib._bootstrap>", line 983, in _find_and_load
File "<frozen importlib._bootstrap>", line 953, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 1006, in _gcd_import
File "<frozen importlib._bootstrap>", line 983, in _find_and_load
File "<frozen importlib._bootstrap>", line 967, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 677, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 728, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/home/berg/PaddleSpeech/paddlespeech/cli/__init__.py", line 16, in <module>
from .asr import ASRExecutor
File "/home/berg/PaddleSpeech/paddlespeech/cli/asr/__init__.py", line 14, in <module>
from .infer import ASRExecutor
File "/home/berg/PaddleSpeech/paddlespeech/cli/asr/infer.py", line 30, in <module>
from ..utils import cli_register
File "/home/berg/PaddleSpeech/paddlespeech/cli/utils.py", line 33, in <module>
from .. import __version__
ImportError: cannot import name '__version__' from 'paddlespeech' (/home/berg/PaddleSpeech/paddlespeech/__init__.py)
```
After installing miniconda in ubuntu 18.04.3, i changed to root, and go on creating the venvs and executing pip installs...
`#conda create -y -p tools/venvs python=3.7 `
| [
{
"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport hashlib\nimport inspect\nimport json\nimport os\nimport tarfile\nimport threading\nimport time\nimport uuid\nimport zipfile\nfrom typing import Any\nfrom typing import Dict\n\nimport paddle\nimport requests\nimport yaml\nfrom paddle.framework import load\n\nimport paddleaudio\nfrom . import download\nfrom .. import __version__\nfrom .entry import commands\n\nrequests.adapters.DEFAULT_RETRIES = 3\n\n__all__ = [\n 'cli_register',\n 'get_command',\n 'download_and_decompress',\n 'load_state_dict_from_url',\n 'stats_wrapper',\n]\n\n\ndef cli_register(name: str, description: str='') -> Any:\n def _warpper(command):\n items = name.split('.')\n\n com = commands\n for item in items:\n com = com[item]\n com['_entry'] = command\n if description:\n com['_description'] = description\n return command\n\n return _warpper\n\n\ndef get_command(name: str) -> Any:\n items = name.split('.')\n com = commands\n for item in items:\n com = com[item]\n\n return com['_entry']\n\n\ndef _get_uncompress_path(filepath: os.PathLike) -> os.PathLike:\n file_dir = os.path.dirname(filepath)\n is_zip_file = False\n if tarfile.is_tarfile(filepath):\n files = tarfile.open(filepath, \"r:*\")\n file_list = files.getnames()\n elif zipfile.is_zipfile(filepath):\n files = zipfile.ZipFile(filepath, 'r')\n file_list = files.namelist()\n is_zip_file = True\n else:\n return file_dir\n\n if download._is_a_single_file(file_list):\n rootpath = file_list[0]\n uncompressed_path = os.path.join(file_dir, rootpath)\n elif download._is_a_single_dir(file_list):\n if is_zip_file:\n rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[0]\n else:\n rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1]\n uncompressed_path = os.path.join(file_dir, rootpath)\n else:\n rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1]\n uncompressed_path = os.path.join(file_dir, rootpath)\n\n files.close()\n return uncompressed_path\n\n\ndef download_and_decompress(archive: Dict[str, str], path: str) -> os.PathLike:\n \"\"\"\n Download archieves and decompress to specific path.\n \"\"\"\n if not os.path.isdir(path):\n os.makedirs(path)\n\n assert 'url' in archive and 'md5' in archive, \\\n 'Dictionary keys of \"url\" and \"md5\" are required in the archive, but got: {}'.format(list(archive.keys()))\n\n filepath = os.path.join(path, os.path.basename(archive['url']))\n if os.path.isfile(filepath) and download._md5check(filepath,\n archive['md5']):\n uncompress_path = _get_uncompress_path(filepath)\n if not os.path.isdir(uncompress_path):\n download._decompress(filepath)\n else:\n StatsWorker(\n task='download',\n version=__version__,\n extra_info={\n 'download_url': archive['url'],\n 'paddle_version': paddle.__version__\n }).start()\n uncompress_path = download.get_path_from_url(archive['url'], path,\n archive['md5'])\n\n return uncompress_path\n\n\ndef load_state_dict_from_url(url: str, path: str, md5: str=None) -> os.PathLike:\n \"\"\"\n Download and load a state dict from url\n \"\"\"\n if not os.path.isdir(path):\n os.makedirs(path)\n\n download.get_path_from_url(url, path, md5)\n return load(os.path.join(path, os.path.basename(url)))\n\n\ndef _get_user_home():\n return os.path.expanduser('~')\n\n\ndef _get_paddlespcceh_home():\n if 'PPSPEECH_HOME' in os.environ:\n home_path = os.environ['PPSPEECH_HOME']\n if os.path.exists(home_path):\n if os.path.isdir(home_path):\n return home_path\n else:\n raise RuntimeError(\n 'The environment variable PPSPEECH_HOME {} is not a directory.'.\n format(home_path))\n else:\n return home_path\n return os.path.join(_get_user_home(), '.paddlespeech')\n\n\ndef _get_sub_home(directory):\n home = os.path.join(_get_paddlespcceh_home(), directory)\n if not os.path.exists(home):\n os.makedirs(home)\n return home\n\n\nPPSPEECH_HOME = _get_paddlespcceh_home()\nMODEL_HOME = _get_sub_home('models')\nCONF_HOME = _get_sub_home('conf')\n\n\ndef _md5(text: str):\n '''Calculate the md5 value of the input text.'''\n md5code = hashlib.md5(text.encode())\n return md5code.hexdigest()\n\n\nclass ConfigCache:\n def __init__(self):\n self._data = {}\n self._initialize()\n self.file = os.path.join(CONF_HOME, 'cache.yaml')\n if not os.path.exists(self.file):\n self.flush()\n return\n\n with open(self.file, 'r') as file:\n try:\n cfg = yaml.load(file, Loader=yaml.FullLoader)\n self._data.update(cfg)\n except:\n self.flush()\n\n @property\n def cache_info(self):\n return self._data['cache_info']\n\n def _initialize(self):\n # Set default configuration values.\n cache_info = _md5(str(uuid.uuid1())[-12:]) + \"-\" + str(int(time.time()))\n self._data['cache_info'] = cache_info\n\n def flush(self):\n '''Flush the current configuration into the configuration file.'''\n with open(self.file, 'w') as file:\n cfg = json.loads(json.dumps(self._data))\n yaml.dump(cfg, file)\n\n\nstats_api = \"http://paddlepaddle.org.cn/paddlehub/stat\"\ncache_info = ConfigCache().cache_info\n\n\nclass StatsWorker(threading.Thread):\n def __init__(self,\n task=\"asr\",\n model=None,\n version=__version__,\n extra_info={}):\n threading.Thread.__init__(self)\n self._task = task\n self._model = model\n self._version = version\n self._extra_info = extra_info\n\n def run(self):\n params = {\n 'task': self._task,\n 'version': self._version,\n 'from': 'ppspeech'\n }\n if self._model:\n params['model'] = self._model\n\n self._extra_info.update({\n 'cache_info': cache_info,\n })\n params.update({\"extra\": json.dumps(self._extra_info)})\n\n try:\n requests.get(stats_api, params)\n except Exception:\n pass\n\n return\n\n\ndef _note_one_stat(cls_name, params={}):\n task = cls_name.replace('Executor', '').lower() # XXExecutor\n extra_info = {\n 'paddle_version': paddle.__version__,\n }\n\n if 'model' in params:\n model = params['model']\n else:\n model = None\n\n if 'audio_file' in params:\n try:\n _, sr = paddleaudio.load(params['audio_file'])\n except Exception:\n sr = -1\n\n if task == 'asr':\n extra_info.update({\n 'lang': params['lang'],\n 'inp_sr': sr,\n 'model_sr': params['sample_rate'],\n })\n elif task == 'st':\n extra_info.update({\n 'lang':\n params['src_lang'] + '-' + params['tgt_lang'],\n 'inp_sr':\n sr,\n 'model_sr':\n params['sample_rate'],\n })\n elif task == 'tts':\n model = params['am']\n extra_info.update({\n 'lang': params['lang'],\n 'vocoder': params['voc'],\n })\n elif task == 'cls':\n extra_info.update({\n 'inp_sr': sr,\n })\n elif task == 'text':\n extra_info.update({\n 'sub_task': params['task'],\n 'lang': params['lang'],\n })\n else:\n return\n\n StatsWorker(\n task=task,\n model=model,\n version=__version__,\n extra_info=extra_info, ).start()\n\n\ndef _parse_args(func, *args, **kwargs):\n # FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations)\n argspec = inspect.getfullargspec(func)\n\n keys = argspec[0]\n if keys[0] == 'self': # Remove self pointer.\n keys = keys[1:]\n\n default_values = argspec[3]\n values = [None] * (len(keys) - len(default_values))\n values.extend(list(default_values))\n params = dict(zip(keys, values))\n\n for idx, v in enumerate(args):\n params[keys[idx]] = v\n for k, v in kwargs.items():\n params[k] = v\n\n return params\n\n\ndef stats_wrapper(executor_func):\n def _warpper(self, *args, **kwargs):\n try:\n _note_one_stat(\n type(self).__name__, _parse_args(executor_func, *args,\n **kwargs))\n except Exception:\n pass\n return executor_func(self, *args, **kwargs)\n\n return _warpper\n",
"path": "paddlespeech/cli/utils.py"
}
] | [
{
"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport hashlib\nimport inspect\nimport json\nimport os\nimport tarfile\nimport threading\nimport time\nimport uuid\nimport zipfile\nfrom typing import Any\nfrom typing import Dict\n\nimport paddle\nimport requests\nimport yaml\nfrom paddle.framework import load\n\nimport paddleaudio\nfrom . import download\nfrom .entry import commands\ntry:\n from .. import __version__\nexcept ImportError:\n __version__ = 0.0.0 # for develop branch\n\nrequests.adapters.DEFAULT_RETRIES = 3\n\n__all__ = [\n 'cli_register',\n 'get_command',\n 'download_and_decompress',\n 'load_state_dict_from_url',\n 'stats_wrapper',\n]\n\n\ndef cli_register(name: str, description: str='') -> Any:\n def _warpper(command):\n items = name.split('.')\n\n com = commands\n for item in items:\n com = com[item]\n com['_entry'] = command\n if description:\n com['_description'] = description\n return command\n\n return _warpper\n\n\ndef get_command(name: str) -> Any:\n items = name.split('.')\n com = commands\n for item in items:\n com = com[item]\n\n return com['_entry']\n\n\ndef _get_uncompress_path(filepath: os.PathLike) -> os.PathLike:\n file_dir = os.path.dirname(filepath)\n is_zip_file = False\n if tarfile.is_tarfile(filepath):\n files = tarfile.open(filepath, \"r:*\")\n file_list = files.getnames()\n elif zipfile.is_zipfile(filepath):\n files = zipfile.ZipFile(filepath, 'r')\n file_list = files.namelist()\n is_zip_file = True\n else:\n return file_dir\n\n if download._is_a_single_file(file_list):\n rootpath = file_list[0]\n uncompressed_path = os.path.join(file_dir, rootpath)\n elif download._is_a_single_dir(file_list):\n if is_zip_file:\n rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[0]\n else:\n rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1]\n uncompressed_path = os.path.join(file_dir, rootpath)\n else:\n rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1]\n uncompressed_path = os.path.join(file_dir, rootpath)\n\n files.close()\n return uncompressed_path\n\n\ndef download_and_decompress(archive: Dict[str, str], path: str) -> os.PathLike:\n \"\"\"\n Download archieves and decompress to specific path.\n \"\"\"\n if not os.path.isdir(path):\n os.makedirs(path)\n\n assert 'url' in archive and 'md5' in archive, \\\n 'Dictionary keys of \"url\" and \"md5\" are required in the archive, but got: {}'.format(list(archive.keys()))\n\n filepath = os.path.join(path, os.path.basename(archive['url']))\n if os.path.isfile(filepath) and download._md5check(filepath,\n archive['md5']):\n uncompress_path = _get_uncompress_path(filepath)\n if not os.path.isdir(uncompress_path):\n download._decompress(filepath)\n else:\n StatsWorker(\n task='download',\n version=__version__,\n extra_info={\n 'download_url': archive['url'],\n 'paddle_version': paddle.__version__\n }).start()\n uncompress_path = download.get_path_from_url(archive['url'], path,\n archive['md5'])\n\n return uncompress_path\n\n\ndef load_state_dict_from_url(url: str, path: str, md5: str=None) -> os.PathLike:\n \"\"\"\n Download and load a state dict from url\n \"\"\"\n if not os.path.isdir(path):\n os.makedirs(path)\n\n download.get_path_from_url(url, path, md5)\n return load(os.path.join(path, os.path.basename(url)))\n\n\ndef _get_user_home():\n return os.path.expanduser('~')\n\n\ndef _get_paddlespcceh_home():\n if 'PPSPEECH_HOME' in os.environ:\n home_path = os.environ['PPSPEECH_HOME']\n if os.path.exists(home_path):\n if os.path.isdir(home_path):\n return home_path\n else:\n raise RuntimeError(\n 'The environment variable PPSPEECH_HOME {} is not a directory.'.\n format(home_path))\n else:\n return home_path\n return os.path.join(_get_user_home(), '.paddlespeech')\n\n\ndef _get_sub_home(directory):\n home = os.path.join(_get_paddlespcceh_home(), directory)\n if not os.path.exists(home):\n os.makedirs(home)\n return home\n\n\nPPSPEECH_HOME = _get_paddlespcceh_home()\nMODEL_HOME = _get_sub_home('models')\nCONF_HOME = _get_sub_home('conf')\n\n\ndef _md5(text: str):\n '''Calculate the md5 value of the input text.'''\n md5code = hashlib.md5(text.encode())\n return md5code.hexdigest()\n\n\nclass ConfigCache:\n def __init__(self):\n self._data = {}\n self._initialize()\n self.file = os.path.join(CONF_HOME, 'cache.yaml')\n if not os.path.exists(self.file):\n self.flush()\n return\n\n with open(self.file, 'r') as file:\n try:\n cfg = yaml.load(file, Loader=yaml.FullLoader)\n self._data.update(cfg)\n except:\n self.flush()\n\n @property\n def cache_info(self):\n return self._data['cache_info']\n\n def _initialize(self):\n # Set default configuration values.\n cache_info = _md5(str(uuid.uuid1())[-12:]) + \"-\" + str(int(time.time()))\n self._data['cache_info'] = cache_info\n\n def flush(self):\n '''Flush the current configuration into the configuration file.'''\n with open(self.file, 'w') as file:\n cfg = json.loads(json.dumps(self._data))\n yaml.dump(cfg, file)\n\n\nstats_api = \"http://paddlepaddle.org.cn/paddlehub/stat\"\ncache_info = ConfigCache().cache_info\n\n\nclass StatsWorker(threading.Thread):\n def __init__(self,\n task=\"asr\",\n model=None,\n version=__version__,\n extra_info={}):\n threading.Thread.__init__(self)\n self._task = task\n self._model = model\n self._version = version\n self._extra_info = extra_info\n\n def run(self):\n params = {\n 'task': self._task,\n 'version': self._version,\n 'from': 'ppspeech'\n }\n if self._model:\n params['model'] = self._model\n\n self._extra_info.update({\n 'cache_info': cache_info,\n })\n params.update({\"extra\": json.dumps(self._extra_info)})\n\n try:\n requests.get(stats_api, params)\n except Exception:\n pass\n\n return\n\n\ndef _note_one_stat(cls_name, params={}):\n task = cls_name.replace('Executor', '').lower() # XXExecutor\n extra_info = {\n 'paddle_version': paddle.__version__,\n }\n\n if 'model' in params:\n model = params['model']\n else:\n model = None\n\n if 'audio_file' in params:\n try:\n _, sr = paddleaudio.load(params['audio_file'])\n except Exception:\n sr = -1\n\n if task == 'asr':\n extra_info.update({\n 'lang': params['lang'],\n 'inp_sr': sr,\n 'model_sr': params['sample_rate'],\n })\n elif task == 'st':\n extra_info.update({\n 'lang':\n params['src_lang'] + '-' + params['tgt_lang'],\n 'inp_sr':\n sr,\n 'model_sr':\n params['sample_rate'],\n })\n elif task == 'tts':\n model = params['am']\n extra_info.update({\n 'lang': params['lang'],\n 'vocoder': params['voc'],\n })\n elif task == 'cls':\n extra_info.update({\n 'inp_sr': sr,\n })\n elif task == 'text':\n extra_info.update({\n 'sub_task': params['task'],\n 'lang': params['lang'],\n })\n else:\n return\n\n StatsWorker(\n task=task,\n model=model,\n version=__version__,\n extra_info=extra_info, ).start()\n\n\ndef _parse_args(func, *args, **kwargs):\n # FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations)\n argspec = inspect.getfullargspec(func)\n\n keys = argspec[0]\n if keys[0] == 'self': # Remove self pointer.\n keys = keys[1:]\n\n default_values = argspec[3]\n values = [None] * (len(keys) - len(default_values))\n values.extend(list(default_values))\n params = dict(zip(keys, values))\n\n for idx, v in enumerate(args):\n params[keys[idx]] = v\n for k, v in kwargs.items():\n params[k] = v\n\n return params\n\n\ndef stats_wrapper(executor_func):\n def _warpper(self, *args, **kwargs):\n try:\n _note_one_stat(\n type(self).__name__, _parse_args(executor_func, *args,\n **kwargs))\n except Exception:\n pass\n return executor_func(self, *args, **kwargs)\n\n return _warpper\n",
"path": "paddlespeech/cli/utils.py"
}
] | diff --git a/paddlespeech/cli/utils.py b/paddlespeech/cli/utils.py
index d11178df8e6..4f2c8906599 100644
--- a/paddlespeech/cli/utils.py
+++ b/paddlespeech/cli/utils.py
@@ -30,8 +30,11 @@
import paddleaudio
from . import download
-from .. import __version__
from .entry import commands
+try:
+ from .. import __version__
+except ImportError:
+ __version__ = 0.0.0 # for develop branch
requests.adapters.DEFAULT_RETRIES = 3
|
pydantic__pydantic-4329 | Serialization -> de-serialisation fails for small timedelta (< 100 microseconds)
### Checks
* [x] I added a descriptive title to this issue
* [x] I have searched (google, github) for similar issues and couldn't find anything
* [x] I have read and followed [the docs](https://pydantic-docs.helpmanual.io/) and still think this is a bug
<!-- Sorry to sound so draconian, but every second saved replying to issues is time spend improving pydantic :-) -->
# Bug
The serialization -> de-serialization of a model with small `timedelta` raises a `ValidationError`. The de-serialization fails only when the `timedelta` is below 100 microseconds, see the following example:
```py
from datetime import timedelta
from pydantic import BaseModel
class Model(BaseModel):
duration: timedelta
# This works
model = Model(duration=timedelta(microseconds=100))
Model.parse_raw(model.json())
# This Fails
model = Model(duration=timedelta(microseconds=99))
Model.parse_raw(model.json())
```
Last line throws the following error:
```py
pydantic.error_wrappers.ValidationError: 1 validation error for Model
duration
invalid duration format (type=value_error.duration)
```
I believe the error comes from the `parse_duration` function, and in particular the line where the input value is converted to `str`.
https://github.com/samuelcolvin/pydantic/blob/c256dccbb383a7fd462f62fcb5d55558eb3cb108/pydantic/datetime_parse.py#L226-L231
Indeed `str(0.0001)` gives `"0.0001"` but `str(0.000099)` gives `"9.9e-5"`, thus the `re.match` fails.
Changing `value = str(value)` to `value = "f{value:.6f}"` should fix this. I would be happy to create a PR to solve the issue.
# System information
Output of `python -c "import pydantic.utils; print(pydantic.utils.version_info())"`:
```
pydantic version: 1.8.2
pydantic compiled: True
install path: <my-home>/.pyenv/versions/3.7.11/envs/pydantic/lib/python3.7/site-packages/pydantic
python version: 3.7.11 (default, Aug 31 2021, 20:43:02) [Clang 10.0.1 (clang-1001.0.46.4)]
platform: Darwin-19.6.0-x86_64-i386-64bit
optional deps. installed: ['typing-extensions']
```
| [
{
"content": "\"\"\"\nFunctions to parse datetime objects.\n\nWe're using regular expressions rather than time.strptime because:\n- They provide both validation and parsing.\n- They're more flexible for datetimes.\n- The date/datetime/time constructors produce friendlier error messages.\n\nStolen from https://raw.githubusercontent.com/django/django/master/django/utils/dateparse.py at\n9718fa2e8abe430c3526a9278dd976443d4ae3c6\n\nChanged to:\n* use standard python datetime types not django.utils.timezone\n* raise ValueError when regex doesn't match rather than returning None\n* support parsing unix timestamps for dates and datetimes\n\"\"\"\nimport re\nfrom datetime import date, datetime, time, timedelta, timezone\nfrom typing import Dict, Optional, Type, Union\n\nfrom . import errors\n\ndate_expr = r'(?P<year>\\d{4})-(?P<month>\\d{1,2})-(?P<day>\\d{1,2})'\ntime_expr = (\n r'(?P<hour>\\d{1,2}):(?P<minute>\\d{1,2})'\n r'(?::(?P<second>\\d{1,2})(?:\\.(?P<microsecond>\\d{1,6})\\d{0,6})?)?'\n r'(?P<tzinfo>Z|[+-]\\d{2}(?::?\\d{2})?)?$'\n)\n\ndate_re = re.compile(f'{date_expr}$')\ntime_re = re.compile(time_expr)\ndatetime_re = re.compile(f'{date_expr}[T ]{time_expr}')\n\nstandard_duration_re = re.compile(\n r'^'\n r'(?:(?P<days>-?\\d+) (days?, )?)?'\n r'((?:(?P<hours>-?\\d+):)(?=\\d+:\\d+))?'\n r'(?:(?P<minutes>-?\\d+):)?'\n r'(?P<seconds>-?\\d+)'\n r'(?:\\.(?P<microseconds>\\d{1,6})\\d{0,6})?'\n r'$'\n)\n\n# Support the sections of ISO 8601 date representation that are accepted by timedelta\niso8601_duration_re = re.compile(\n r'^(?P<sign>[-+]?)'\n r'P'\n r'(?:(?P<days>\\d+(.\\d+)?)D)?'\n r'(?:T'\n r'(?:(?P<hours>\\d+(.\\d+)?)H)?'\n r'(?:(?P<minutes>\\d+(.\\d+)?)M)?'\n r'(?:(?P<seconds>\\d+(.\\d+)?)S)?'\n r')?'\n r'$'\n)\n\nEPOCH = datetime(1970, 1, 1)\n# if greater than this, the number is in ms, if less than or equal it's in seconds\n# (in seconds this is 11th October 2603, in ms it's 20th August 1970)\nMS_WATERSHED = int(2e10)\n# slightly more than datetime.max in ns - (datetime.max - EPOCH).total_seconds() * 1e9\nMAX_NUMBER = int(3e20)\nStrBytesIntFloat = Union[str, bytes, int, float]\n\n\ndef get_numeric(value: StrBytesIntFloat, native_expected_type: str) -> Union[None, int, float]:\n if isinstance(value, (int, float)):\n return value\n try:\n return float(value)\n except ValueError:\n return None\n except TypeError:\n raise TypeError(f'invalid type; expected {native_expected_type}, string, bytes, int or float')\n\n\ndef from_unix_seconds(seconds: Union[int, float]) -> datetime:\n if seconds > MAX_NUMBER:\n return datetime.max\n elif seconds < -MAX_NUMBER:\n return datetime.min\n\n while abs(seconds) > MS_WATERSHED:\n seconds /= 1000\n dt = EPOCH + timedelta(seconds=seconds)\n return dt.replace(tzinfo=timezone.utc)\n\n\ndef _parse_timezone(value: Optional[str], error: Type[Exception]) -> Union[None, int, timezone]:\n if value == 'Z':\n return timezone.utc\n elif value is not None:\n offset_mins = int(value[-2:]) if len(value) > 3 else 0\n offset = 60 * int(value[1:3]) + offset_mins\n if value[0] == '-':\n offset = -offset\n try:\n return timezone(timedelta(minutes=offset))\n except ValueError:\n raise error()\n else:\n return None\n\n\ndef parse_date(value: Union[date, StrBytesIntFloat]) -> date:\n \"\"\"\n Parse a date/int/float/string and return a datetime.date.\n\n Raise ValueError if the input is well formatted but not a valid date.\n Raise ValueError if the input isn't well formatted.\n \"\"\"\n if isinstance(value, date):\n if isinstance(value, datetime):\n return value.date()\n else:\n return value\n\n number = get_numeric(value, 'date')\n if number is not None:\n return from_unix_seconds(number).date()\n\n if isinstance(value, bytes):\n value = value.decode()\n\n match = date_re.match(value) # type: ignore\n if match is None:\n raise errors.DateError()\n\n kw = {k: int(v) for k, v in match.groupdict().items()}\n\n try:\n return date(**kw)\n except ValueError:\n raise errors.DateError()\n\n\ndef parse_time(value: Union[time, StrBytesIntFloat]) -> time:\n \"\"\"\n Parse a time/string and return a datetime.time.\n\n Raise ValueError if the input is well formatted but not a valid time.\n Raise ValueError if the input isn't well formatted, in particular if it contains an offset.\n \"\"\"\n if isinstance(value, time):\n return value\n\n number = get_numeric(value, 'time')\n if number is not None:\n if number >= 86400:\n # doesn't make sense since the time time loop back around to 0\n raise errors.TimeError()\n return (datetime.min + timedelta(seconds=number)).time()\n\n if isinstance(value, bytes):\n value = value.decode()\n\n match = time_re.match(value) # type: ignore\n if match is None:\n raise errors.TimeError()\n\n kw = match.groupdict()\n if kw['microsecond']:\n kw['microsecond'] = kw['microsecond'].ljust(6, '0')\n\n tzinfo = _parse_timezone(kw.pop('tzinfo'), errors.TimeError)\n kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}\n kw_['tzinfo'] = tzinfo\n\n try:\n return time(**kw_) # type: ignore\n except ValueError:\n raise errors.TimeError()\n\n\ndef parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:\n \"\"\"\n Parse a datetime/int/float/string and return a datetime.datetime.\n\n This function supports time zone offsets. When the input contains one,\n the output uses a timezone with a fixed offset from UTC.\n\n Raise ValueError if the input is well formatted but not a valid datetime.\n Raise ValueError if the input isn't well formatted.\n \"\"\"\n if isinstance(value, datetime):\n return value\n\n number = get_numeric(value, 'datetime')\n if number is not None:\n return from_unix_seconds(number)\n\n if isinstance(value, bytes):\n value = value.decode()\n\n match = datetime_re.match(value) # type: ignore\n if match is None:\n raise errors.DateTimeError()\n\n kw = match.groupdict()\n if kw['microsecond']:\n kw['microsecond'] = kw['microsecond'].ljust(6, '0')\n\n tzinfo = _parse_timezone(kw.pop('tzinfo'), errors.DateTimeError)\n kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}\n kw_['tzinfo'] = tzinfo\n\n try:\n return datetime(**kw_) # type: ignore\n except ValueError:\n raise errors.DateTimeError()\n\n\ndef parse_duration(value: StrBytesIntFloat) -> timedelta:\n \"\"\"\n Parse a duration int/float/string and return a datetime.timedelta.\n\n The preferred format for durations in Django is '%d %H:%M:%S.%f'.\n\n Also supports ISO 8601 representation.\n \"\"\"\n if isinstance(value, timedelta):\n return value\n\n if isinstance(value, (int, float)):\n # below code requires a string\n value = str(value)\n elif isinstance(value, bytes):\n value = value.decode()\n\n try:\n match = standard_duration_re.match(value) or iso8601_duration_re.match(value)\n except TypeError:\n raise TypeError('invalid type; expected timedelta, string, bytes, int or float')\n\n if not match:\n raise errors.DurationError()\n\n kw = match.groupdict()\n sign = -1 if kw.pop('sign', '+') == '-' else 1\n if kw.get('microseconds'):\n kw['microseconds'] = kw['microseconds'].ljust(6, '0')\n\n if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'):\n kw['microseconds'] = '-' + kw['microseconds']\n\n kw_ = {k: float(v) for k, v in kw.items() if v is not None}\n\n return sign * timedelta(**kw_)\n",
"path": "pydantic/datetime_parse.py"
}
] | [
{
"content": "\"\"\"\nFunctions to parse datetime objects.\n\nWe're using regular expressions rather than time.strptime because:\n- They provide both validation and parsing.\n- They're more flexible for datetimes.\n- The date/datetime/time constructors produce friendlier error messages.\n\nStolen from https://raw.githubusercontent.com/django/django/master/django/utils/dateparse.py at\n9718fa2e8abe430c3526a9278dd976443d4ae3c6\n\nChanged to:\n* use standard python datetime types not django.utils.timezone\n* raise ValueError when regex doesn't match rather than returning None\n* support parsing unix timestamps for dates and datetimes\n\"\"\"\nimport re\nfrom datetime import date, datetime, time, timedelta, timezone\nfrom typing import Dict, Optional, Type, Union\n\nfrom . import errors\n\ndate_expr = r'(?P<year>\\d{4})-(?P<month>\\d{1,2})-(?P<day>\\d{1,2})'\ntime_expr = (\n r'(?P<hour>\\d{1,2}):(?P<minute>\\d{1,2})'\n r'(?::(?P<second>\\d{1,2})(?:\\.(?P<microsecond>\\d{1,6})\\d{0,6})?)?'\n r'(?P<tzinfo>Z|[+-]\\d{2}(?::?\\d{2})?)?$'\n)\n\ndate_re = re.compile(f'{date_expr}$')\ntime_re = re.compile(time_expr)\ndatetime_re = re.compile(f'{date_expr}[T ]{time_expr}')\n\nstandard_duration_re = re.compile(\n r'^'\n r'(?:(?P<days>-?\\d+) (days?, )?)?'\n r'((?:(?P<hours>-?\\d+):)(?=\\d+:\\d+))?'\n r'(?:(?P<minutes>-?\\d+):)?'\n r'(?P<seconds>-?\\d+)'\n r'(?:\\.(?P<microseconds>\\d{1,6})\\d{0,6})?'\n r'$'\n)\n\n# Support the sections of ISO 8601 date representation that are accepted by timedelta\niso8601_duration_re = re.compile(\n r'^(?P<sign>[-+]?)'\n r'P'\n r'(?:(?P<days>\\d+(.\\d+)?)D)?'\n r'(?:T'\n r'(?:(?P<hours>\\d+(.\\d+)?)H)?'\n r'(?:(?P<minutes>\\d+(.\\d+)?)M)?'\n r'(?:(?P<seconds>\\d+(.\\d+)?)S)?'\n r')?'\n r'$'\n)\n\nEPOCH = datetime(1970, 1, 1)\n# if greater than this, the number is in ms, if less than or equal it's in seconds\n# (in seconds this is 11th October 2603, in ms it's 20th August 1970)\nMS_WATERSHED = int(2e10)\n# slightly more than datetime.max in ns - (datetime.max - EPOCH).total_seconds() * 1e9\nMAX_NUMBER = int(3e20)\nStrBytesIntFloat = Union[str, bytes, int, float]\n\n\ndef get_numeric(value: StrBytesIntFloat, native_expected_type: str) -> Union[None, int, float]:\n if isinstance(value, (int, float)):\n return value\n try:\n return float(value)\n except ValueError:\n return None\n except TypeError:\n raise TypeError(f'invalid type; expected {native_expected_type}, string, bytes, int or float')\n\n\ndef from_unix_seconds(seconds: Union[int, float]) -> datetime:\n if seconds > MAX_NUMBER:\n return datetime.max\n elif seconds < -MAX_NUMBER:\n return datetime.min\n\n while abs(seconds) > MS_WATERSHED:\n seconds /= 1000\n dt = EPOCH + timedelta(seconds=seconds)\n return dt.replace(tzinfo=timezone.utc)\n\n\ndef _parse_timezone(value: Optional[str], error: Type[Exception]) -> Union[None, int, timezone]:\n if value == 'Z':\n return timezone.utc\n elif value is not None:\n offset_mins = int(value[-2:]) if len(value) > 3 else 0\n offset = 60 * int(value[1:3]) + offset_mins\n if value[0] == '-':\n offset = -offset\n try:\n return timezone(timedelta(minutes=offset))\n except ValueError:\n raise error()\n else:\n return None\n\n\ndef parse_date(value: Union[date, StrBytesIntFloat]) -> date:\n \"\"\"\n Parse a date/int/float/string and return a datetime.date.\n\n Raise ValueError if the input is well formatted but not a valid date.\n Raise ValueError if the input isn't well formatted.\n \"\"\"\n if isinstance(value, date):\n if isinstance(value, datetime):\n return value.date()\n else:\n return value\n\n number = get_numeric(value, 'date')\n if number is not None:\n return from_unix_seconds(number).date()\n\n if isinstance(value, bytes):\n value = value.decode()\n\n match = date_re.match(value) # type: ignore\n if match is None:\n raise errors.DateError()\n\n kw = {k: int(v) for k, v in match.groupdict().items()}\n\n try:\n return date(**kw)\n except ValueError:\n raise errors.DateError()\n\n\ndef parse_time(value: Union[time, StrBytesIntFloat]) -> time:\n \"\"\"\n Parse a time/string and return a datetime.time.\n\n Raise ValueError if the input is well formatted but not a valid time.\n Raise ValueError if the input isn't well formatted, in particular if it contains an offset.\n \"\"\"\n if isinstance(value, time):\n return value\n\n number = get_numeric(value, 'time')\n if number is not None:\n if number >= 86400:\n # doesn't make sense since the time time loop back around to 0\n raise errors.TimeError()\n return (datetime.min + timedelta(seconds=number)).time()\n\n if isinstance(value, bytes):\n value = value.decode()\n\n match = time_re.match(value) # type: ignore\n if match is None:\n raise errors.TimeError()\n\n kw = match.groupdict()\n if kw['microsecond']:\n kw['microsecond'] = kw['microsecond'].ljust(6, '0')\n\n tzinfo = _parse_timezone(kw.pop('tzinfo'), errors.TimeError)\n kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}\n kw_['tzinfo'] = tzinfo\n\n try:\n return time(**kw_) # type: ignore\n except ValueError:\n raise errors.TimeError()\n\n\ndef parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:\n \"\"\"\n Parse a datetime/int/float/string and return a datetime.datetime.\n\n This function supports time zone offsets. When the input contains one,\n the output uses a timezone with a fixed offset from UTC.\n\n Raise ValueError if the input is well formatted but not a valid datetime.\n Raise ValueError if the input isn't well formatted.\n \"\"\"\n if isinstance(value, datetime):\n return value\n\n number = get_numeric(value, 'datetime')\n if number is not None:\n return from_unix_seconds(number)\n\n if isinstance(value, bytes):\n value = value.decode()\n\n match = datetime_re.match(value) # type: ignore\n if match is None:\n raise errors.DateTimeError()\n\n kw = match.groupdict()\n if kw['microsecond']:\n kw['microsecond'] = kw['microsecond'].ljust(6, '0')\n\n tzinfo = _parse_timezone(kw.pop('tzinfo'), errors.DateTimeError)\n kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}\n kw_['tzinfo'] = tzinfo\n\n try:\n return datetime(**kw_) # type: ignore\n except ValueError:\n raise errors.DateTimeError()\n\n\ndef parse_duration(value: StrBytesIntFloat) -> timedelta:\n \"\"\"\n Parse a duration int/float/string and return a datetime.timedelta.\n\n The preferred format for durations in Django is '%d %H:%M:%S.%f'.\n\n Also supports ISO 8601 representation.\n \"\"\"\n if isinstance(value, timedelta):\n return value\n\n if isinstance(value, (int, float)):\n # below code requires a string\n value = f'{value:f}'\n elif isinstance(value, bytes):\n value = value.decode()\n\n try:\n match = standard_duration_re.match(value) or iso8601_duration_re.match(value)\n except TypeError:\n raise TypeError('invalid type; expected timedelta, string, bytes, int or float')\n\n if not match:\n raise errors.DurationError()\n\n kw = match.groupdict()\n sign = -1 if kw.pop('sign', '+') == '-' else 1\n if kw.get('microseconds'):\n kw['microseconds'] = kw['microseconds'].ljust(6, '0')\n\n if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'):\n kw['microseconds'] = '-' + kw['microseconds']\n\n kw_ = {k: float(v) for k, v in kw.items() if v is not None}\n\n return sign * timedelta(**kw_)\n",
"path": "pydantic/datetime_parse.py"
}
] | diff --git a/changes/3315-samuelcolvin.md b/changes/3315-samuelcolvin.md
new file mode 100644
index 00000000000..47f38c5b6c0
--- /dev/null
+++ b/changes/3315-samuelcolvin.md
@@ -0,0 +1 @@
+Fix parsing of very small numeric timedelta values
diff --git a/pydantic/datetime_parse.py b/pydantic/datetime_parse.py
index 024a899d029..b642f17bbd4 100644
--- a/pydantic/datetime_parse.py
+++ b/pydantic/datetime_parse.py
@@ -223,7 +223,7 @@ def parse_duration(value: StrBytesIntFloat) -> timedelta:
if isinstance(value, (int, float)):
# below code requires a string
- value = str(value)
+ value = f'{value:f}'
elif isinstance(value, bytes):
value = value.decode()
diff --git a/tests/test_datetime_parse.py b/tests/test_datetime_parse.py
index f714d6667d8..004b161c0ef 100644
--- a/tests/test_datetime_parse.py
+++ b/tests/test_datetime_parse.py
@@ -175,6 +175,7 @@ def test_parse_python_format(delta):
('30', timedelta(seconds=30)),
(30, timedelta(seconds=30)),
(30.1, timedelta(seconds=30, milliseconds=100)),
+ (9.9e-05, timedelta(microseconds=99)),
# minutes seconds
('15:30', timedelta(minutes=15, seconds=30)),
('5:30', timedelta(minutes=5, seconds=30)),
|
Flexget__Flexget-1636 | Trailing slashes in API
### Expected behaviour:
API urls should work with both trailing and none trailing slashes (for example `/api/auth/login` and `/api/auth/login/`)
### Actual behaviour:
Only the ones with trailing slashes work, the other ones throw 500 errors (debug logs consumed by cherrypy)
### Steps to reproduce:
- Step 1: Setup web server
- Step 2: Call endpoint without trailing slash
#### Config:
```
web_server: yes
```
#### Log:
```
2017-01-16 15:55 ERROR cherrypy.error [16/Jan/2017:15:55:04] ENGINE FormDataRoutingRedirect('A request was sent to this URL (http://localhost:5050/api/auth/login) but a redirect was issued automatically by the routing system to "http://localhost:5050/api/auth/login/". The URL was defined with a trailing slash so Flask will automatically redirect to the URL with the trailing slash if it was accessed without one. Make sure to directly send your POST-request to this URL since we can\'t make browsers or HTTP clients redirect with form data reliably or without user interaction.\n\nNote: this exception is only raised in debug mode',)
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/cherrypy/wsgiserver/__init__.py", line 1408, in communicate
req.respond()
File "/usr/local/lib/python2.7/dist-packages/cherrypy/wsgiserver/__init__.py", line 862, in respond
self.server.gateway(self).respond()
File "/usr/local/lib/python2.7/dist-packages/cherrypy/wsgiserver/__init__.py", line 2335, in respond
response = self.req.server.wsgi_app(self.env, self.start_response)
File "/usr/local/lib/python2.7/dist-packages/cherrypy/_cptree.py", line 287, in __call__
return app(environ, start_response)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1994, in __call__
return self.wsgi_app(environ, start_response)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1985, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 271, in error_router
return original_handler(e)
File "/usr/local/lib/python2.7/dist-packages/flask_cors/extension.py", line 161, in wrapped_function
return cors_after_request(app.make_response(f(*args, **kwargs)))
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1540, in handle_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 271, in error_router
return original_handler(e)
File "/usr/local/lib/python2.7/dist-packages/flask_cors/extension.py", line 161, in wrapped_function
return cors_after_request(app.make_response(f(*args, **kwargs)))
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1590, in dispatch_request
self.raise_routing_exception(req)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1576, in raise_routing_exception
raise FormDataRoutingRedirect(request)
FormDataRoutingRedirect: A request was sent to this URL (http://localhost:5050/api/auth/login) but a redirect was issued automatically by the routing system to "http://localhost:5050/api/auth/login/". The URL was defined with a trailing slash so Flask will automatically redirect to the URL with the trailing slash if it was accessed without one. Make sure to directly send your POST-request to this URL since we can't make browsers or HTTP clients redirect with form data reliably or without user interaction.
Note: this exception is only raised in debug mode
```
### Additional information:
- Flexget Version: latest dev (2.9.6.dev)
- Python Version: 2.7.6
- Installation method: Github
- OS and version: Ubuntu 14.04
Cherrypy has a tool to solve this, but because we graft the WSGI Flask applications, it's not usable. This should probably be resolved from inside the Flask api app itself.
Trailing slashes in API
### Expected behaviour:
API urls should work with both trailing and none trailing slashes (for example `/api/auth/login` and `/api/auth/login/`)
### Actual behaviour:
Only the ones with trailing slashes work, the other ones throw 500 errors (debug logs consumed by cherrypy)
### Steps to reproduce:
- Step 1: Setup web server
- Step 2: Call endpoint without trailing slash
#### Config:
```
web_server: yes
```
#### Log:
```
2017-01-16 15:55 ERROR cherrypy.error [16/Jan/2017:15:55:04] ENGINE FormDataRoutingRedirect('A request was sent to this URL (http://localhost:5050/api/auth/login) but a redirect was issued automatically by the routing system to "http://localhost:5050/api/auth/login/". The URL was defined with a trailing slash so Flask will automatically redirect to the URL with the trailing slash if it was accessed without one. Make sure to directly send your POST-request to this URL since we can\'t make browsers or HTTP clients redirect with form data reliably or without user interaction.\n\nNote: this exception is only raised in debug mode',)
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/cherrypy/wsgiserver/__init__.py", line 1408, in communicate
req.respond()
File "/usr/local/lib/python2.7/dist-packages/cherrypy/wsgiserver/__init__.py", line 862, in respond
self.server.gateway(self).respond()
File "/usr/local/lib/python2.7/dist-packages/cherrypy/wsgiserver/__init__.py", line 2335, in respond
response = self.req.server.wsgi_app(self.env, self.start_response)
File "/usr/local/lib/python2.7/dist-packages/cherrypy/_cptree.py", line 287, in __call__
return app(environ, start_response)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1994, in __call__
return self.wsgi_app(environ, start_response)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1985, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 271, in error_router
return original_handler(e)
File "/usr/local/lib/python2.7/dist-packages/flask_cors/extension.py", line 161, in wrapped_function
return cors_after_request(app.make_response(f(*args, **kwargs)))
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1540, in handle_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 271, in error_router
return original_handler(e)
File "/usr/local/lib/python2.7/dist-packages/flask_cors/extension.py", line 161, in wrapped_function
return cors_after_request(app.make_response(f(*args, **kwargs)))
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1590, in dispatch_request
self.raise_routing_exception(req)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1576, in raise_routing_exception
raise FormDataRoutingRedirect(request)
FormDataRoutingRedirect: A request was sent to this URL (http://localhost:5050/api/auth/login) but a redirect was issued automatically by the routing system to "http://localhost:5050/api/auth/login/". The URL was defined with a trailing slash so Flask will automatically redirect to the URL with the trailing slash if it was accessed without one. Make sure to directly send your POST-request to this URL since we can't make browsers or HTTP clients redirect with form data reliably or without user interaction.
Note: this exception is only raised in debug mode
```
### Additional information:
- Flexget Version: latest dev (2.9.6.dev)
- Python Version: 2.7.6
- Installation method: Github
- OS and version: Ubuntu 14.04
Cherrypy has a tool to solve this, but because we graft the WSGI Flask applications, it's not usable. This should probably be resolved from inside the Flask api app itself.
| [
{
"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport json\nimport logging\nimport os\nimport re\nfrom collections import deque\nfrom functools import wraps\n\nfrom flask import Flask, request, jsonify, make_response\nfrom flask_compress import Compress\nfrom flask_cors import CORS\nfrom flask_restplus import Model, Api as RestPlusAPI\nfrom flask_restplus import Resource\nfrom flexget import manager\nfrom flexget.config_schema import process_config\nfrom flexget.utils.database import with_session\nfrom flexget.webserver import User\nfrom jsonschema import RefResolutionError\nfrom werkzeug.http import generate_etag\n\nfrom . import __path__\n\n__version__ = '1.1.2'\n\nlog = logging.getLogger('api')\n\n\nclass APIClient(object):\n \"\"\"\n This is an client which can be used as a more pythonic interface to the rest api.\n\n It skips http, and is only usable from within the running flexget process.\n \"\"\"\n\n def __init__(self):\n self.app = api_app.test_client()\n\n def __getattr__(self, item):\n return APIEndpoint('/api/' + item, self.get_endpoint)\n\n def get_endpoint(self, url, data=None, method=None):\n if method is None:\n method = 'POST' if data is not None else 'GET'\n auth_header = dict(Authorization='Token %s' % api_key())\n response = self.app.open(url, data=data, follow_redirects=True, method=method, headers=auth_header)\n result = json.loads(response.get_data(as_text=True))\n # TODO: Proper exceptions\n if 200 > response.status_code >= 300:\n raise Exception(result['error'])\n return result\n\n\nclass APIEndpoint(object):\n def __init__(self, endpoint, caller):\n self.endpoint = endpoint\n self.caller = caller\n\n def __getattr__(self, item):\n return self.__class__(self.endpoint + '/' + item, self.caller)\n\n __getitem__ = __getattr__\n\n def __call__(self, data=None, method=None):\n return self.caller(self.endpoint, data=data, method=method)\n\n\ndef api_version(f):\n \"\"\" Add the 'API-Version' header to all responses \"\"\"\n\n @wraps(f)\n def wrapped(*args, **kwargs):\n rv = f(*args, **kwargs)\n rv.headers['API-Version'] = __version__\n return rv\n\n return wrapped\n\n\nclass APIResource(Resource):\n \"\"\"All api resources should subclass this class.\"\"\"\n method_decorators = [with_session, api_version]\n\n def __init__(self, api, *args, **kwargs):\n self.manager = manager.manager\n super(APIResource, self).__init__(api, *args, **kwargs)\n\n\nclass APISchemaModel(Model):\n \"\"\"A flask restplus :class:`flask_restplus.models.ApiModel` which can take a json schema directly.\"\"\"\n\n def __init__(self, name, schema, *args, **kwargs):\n super(APISchemaModel, self).__init__(name, *args, **kwargs)\n self._schema = schema\n\n @property\n def __schema__(self):\n if self.__parent__:\n return {\n 'allOf': [\n {'$ref': '#/definitions/{0}'.format(self.__parent__.name)},\n self._schema\n ]\n }\n else:\n return self._schema\n\n def __nonzero__(self):\n return bool(self._schema)\n\n def __bool__(self):\n return self._schema is not None\n\n def __repr__(self):\n return '<ApiSchemaModel(%r)>' % self._schema\n\n\nclass API(RestPlusAPI):\n \"\"\"\n Extends a flask restplus :class:`flask_restplus.Api` with:\n - methods to make using json schemas easier\n - methods to auto document and handle :class:`ApiError` responses\n \"\"\"\n\n def _rewrite_refs(self, schema):\n if isinstance(schema, list):\n for value in schema:\n self._rewrite_refs(value)\n\n if isinstance(schema, dict):\n for key, value in schema.items():\n if isinstance(value, (list, dict)):\n self._rewrite_refs(value)\n\n if key == '$ref' and value.startswith('/'):\n schema[key] = '#definitions%s' % value\n\n def schema(self, name, schema, **kwargs):\n \"\"\"\n Register a json schema.\n\n Usable like :meth:`flask_restplus.Api.model`, except takes a json schema as its argument.\n\n :returns: An :class:`ApiSchemaModel` instance registered to this api.\n \"\"\"\n model = APISchemaModel(name, schema, **kwargs)\n model.__apidoc__.update(kwargs)\n self.models[name] = model\n return model\n\n def inherit(self, name, parent, fields):\n \"\"\"\n Extends :meth:`flask_restplus.Api.inherit` to allow `fields` to be a json schema, if `parent` is a\n :class:`ApiSchemaModel`.\n \"\"\"\n if isinstance(parent, APISchemaModel):\n model = APISchemaModel(name, fields)\n model.__apidoc__['name'] = name\n model.__parent__ = parent\n self.models[name] = model\n return model\n return super(API, self).inherit(name, parent, fields)\n\n def validate(self, model, schema_override=None, description=None):\n \"\"\"\n When a method is decorated with this, json data submitted to the endpoint will be validated with the given\n `model`. This also auto-documents the expected model, as well as the possible :class:`ValidationError` response.\n \"\"\"\n\n def decorator(func):\n @api.expect((model, description))\n @api.response(ValidationError)\n @wraps(func)\n def wrapper(*args, **kwargs):\n payload = request.json\n try:\n schema = schema_override if schema_override else model.__schema__\n errors = process_config(config=payload, schema=schema, set_defaults=False)\n\n if errors:\n raise ValidationError(errors)\n except RefResolutionError as e:\n raise APIError(str(e))\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator\n\n def response(self, code_or_apierror, description='Success', model=None, **kwargs):\n \"\"\"\n Extends :meth:`flask_restplus.Api.response` to allow passing an :class:`ApiError` class instead of\n response code. If an `ApiError` is used, the response code, and expected response model, is automatically\n documented.\n \"\"\"\n try:\n if issubclass(code_or_apierror, APIError):\n description = code_or_apierror.description or description\n return self.doc(\n responses={code_or_apierror.status_code: (description, code_or_apierror.response_model)}, **kwargs)\n except TypeError:\n # If first argument isn't a class this happens\n pass\n return super(API, self).response(code_or_apierror, description, model=model, **kwargs)\n\n def pagination_parser(self, parser=None, sort_choices=None, default=None, add_sort=None):\n \"\"\"\n Return a standardized pagination parser, to be used for any endpoint that has pagination.\n\n :param RequestParser parser: Can extend a given parser or create a new one\n :param tuple sort_choices: A tuple of strings, to be used as server side attribute searches\n :param str default: The default sort string, used `sort_choices[0]` if not given\n :param bool add_sort: Add sort order choices without adding specific sort choices\n\n :return: An api.parser() instance with pagination and sorting arguments.\n \"\"\"\n pagination = parser.copy() if parser else self.parser()\n pagination.add_argument('page', type=int, default=1, help='Page number')\n pagination.add_argument('per_page', type=int, default=50, help='Results per page')\n if sort_choices or add_sort:\n pagination.add_argument('order', choices=('desc', 'asc'), default='desc', help='Sorting order')\n if sort_choices:\n pagination.add_argument('sort_by', choices=sort_choices, default=default or sort_choices[0],\n help='Sort by attribute')\n\n return pagination\n\n\napi_app = Flask(__name__, template_folder=os.path.join(__path__[0], 'templates'))\napi_app.config['REMEMBER_COOKIE_NAME'] = 'flexget.token'\napi_app.config['DEBUG'] = True\napi_app.config['ERROR_404_HELP'] = False\n\nCORS(api_app)\nCompress(api_app)\n\napi = API(\n api_app,\n title='Flexget API v{}'.format(__version__),\n version=__version__,\n description='View and manage flexget core operations and plugins. Open each endpoint view for usage information.'\n ' Navigate to http://flexget.com/API for more details.'\n)\n\nbase_message = {\n 'type': 'object',\n 'properties': {\n 'status_code': {'type': 'integer'},\n 'message': {'type': 'string'},\n 'status': {'type': 'string'}\n },\n 'required': ['status_code', 'message', 'status']\n\n}\n\nbase_message_schema = api.schema('base_message', base_message)\n\n\nclass APIError(Exception):\n description = 'Server error'\n status_code = 500\n status = 'Error'\n response_model = base_message_schema\n\n def __init__(self, message=None, payload=None):\n self.message = message\n self.payload = payload\n\n def to_dict(self):\n rv = self.payload or {}\n rv.update(status_code=self.status_code, message=self.message, status=self.status)\n return rv\n\n @classmethod\n def schema(cls):\n return cls.response_model.__schema__\n\n\nclass NotFoundError(APIError):\n status_code = 404\n description = 'Not found'\n\n\nclass Unauthorized(APIError):\n status_code = 401\n description = 'Unauthorized'\n\n\nclass BadRequest(APIError):\n status_code = 400\n description = 'Bad request'\n\n\nclass Conflict(APIError):\n status_code = 409\n description = 'Conflict'\n\n\nclass PreconditionFailed(APIError):\n status_code = 412\n description = 'Precondition failed'\n\n\nclass NotModified(APIError):\n status_code = 304\n description = 'not modified'\n\n\nclass ValidationError(APIError):\n status_code = 422\n description = 'Validation error'\n\n response_model = api.inherit('validation_error', APIError.response_model, {\n 'type': 'object',\n 'properties': {\n 'validation_errors': {\n 'type': 'array',\n 'items': {\n 'type': 'object',\n 'properties': {\n 'message': {'type': 'string', 'description': 'A human readable message explaining the error.'},\n 'validator': {'type': 'string', 'description': 'The name of the failed validator.'},\n 'validator_value': {\n 'type': 'string', 'description': 'The value for the failed validator in the schema.'\n },\n 'path': {'type': 'string'},\n 'schema_path': {'type': 'string'},\n }\n }\n }\n },\n 'required': ['validation_errors']\n })\n\n verror_attrs = (\n 'message', 'cause', 'validator', 'validator_value',\n 'path', 'schema_path', 'parent'\n )\n\n def __init__(self, validation_errors, message='validation error'):\n payload = {'validation_errors': [self._verror_to_dict(error) for error in validation_errors]}\n super(ValidationError, self).__init__(message, payload=payload)\n\n def _verror_to_dict(self, error):\n error_dict = {}\n for attr in self.verror_attrs:\n if isinstance(getattr(error, attr), deque):\n error_dict[attr] = list(getattr(error, attr))\n else:\n error_dict[attr] = str(getattr(error, attr))\n return error_dict\n\n\nempty_response = api.schema('empty', {'type': 'object'})\n\n\ndef success_response(message, status_code=200, status='success'):\n rsp_dict = {\n 'message': message,\n 'status_code': status_code,\n 'status': status\n }\n rsp = jsonify(rsp_dict)\n rsp.status_code = status_code\n return rsp\n\n\[email protected](APIError)\[email protected](NotFoundError)\[email protected](ValidationError)\[email protected](BadRequest)\[email protected](Unauthorized)\[email protected](Conflict)\[email protected](NotModified)\[email protected](PreconditionFailed)\ndef api_errors(error):\n return error.to_dict(), error.status_code\n\n\n@with_session\ndef api_key(session=None):\n log.debug('fetching token for internal lookup')\n return session.query(User).first().token\n\n\ndef etag(f):\n \"\"\"\n A decorator that add an ETag header to the response and checks for the \"If-Match\" and \"If-Not-Match\" headers to\n return an appropriate response.\n\n :param f: A GET or HEAD flask method to wrap\n :return: The method's response with the ETag and Cache-Control headers, raises a 412 error or returns a 304 response\n \"\"\"\n\n @wraps(f)\n def wrapped(*args, **kwargs):\n # Identify if this is a GET or HEAD in order to proceed\n assert request.method in ['HEAD', 'GET'], '@etag is only supported for GET requests'\n rv = f(*args, **kwargs)\n rv = make_response(rv)\n\n # Some headers can change without data change for specific page\n content_headers = rv.headers.get('link', '') + rv.headers.get('count', '') + rv.headers.get('total-count', '')\n data = (rv.get_data().decode() + content_headers).encode()\n etag = generate_etag(data)\n rv.headers['Cache-Control'] = 'max-age=86400'\n rv.headers['ETag'] = etag\n if_match = request.headers.get('If-Match')\n if_none_match = request.headers.get('If-None-Match')\n\n if if_match:\n etag_list = [tag.strip() for tag in if_match.split(',')]\n if etag not in etag_list and '*' not in etag_list:\n raise PreconditionFailed('etag does not match')\n elif if_none_match:\n etag_list = [tag.strip() for tag in if_none_match.split(',')]\n if etag in etag_list or '*' in etag_list:\n raise NotModified\n\n return rv\n\n return wrapped\n\n\ndef pagination_headers(total_pages, total_items, page_count, request):\n \"\"\"\n Creates the `Link`. 'Count' and 'Total-Count' headers, to be used for pagination traversing\n\n :param total_pages: Total number of pages\n :param total_items: Total number of items in all the pages\n :param page_count: Item count for page (may differ from page size request)\n :param request: The flask request used, required to build other reoccurring vars like url and such.\n :return:\n \"\"\"\n\n # Build constant variables from request data\n url = request.url_root + request.path.lstrip('/')\n per_page = request.args.get('per_page', 50)\n page = int(request.args.get('page', 1))\n\n # Build the base template\n LINKTEMPLATE = '<{}?per_page={}&'.format(url, per_page)\n\n # Removed page and per_page from query string\n query_string = re.sub(b'per_page=\\d+', b'', request.query_string)\n query_string = re.sub(b'page=\\d+', b'', query_string)\n query_string = re.sub(b'&{2,}', b'&', query_string)\n\n # Add all original query params\n LINKTEMPLATE += query_string.decode().lstrip('&') + '&page={}>; rel=\"{}\"'\n\n link_string = ''\n\n if page > 1:\n link_string += LINKTEMPLATE.format(page - 1, 'prev') + ', '\n if page < total_pages:\n link_string += LINKTEMPLATE.format(page + 1, 'next') + ', '\n link_string += LINKTEMPLATE.format(total_pages, 'last')\n\n return {\n 'Link': link_string,\n 'Total-Count': total_items,\n 'Count': page_count\n }\n",
"path": "flexget/api/app.py"
}
] | [
{
"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport json\nimport logging\nimport os\nimport re\nfrom collections import deque\nfrom functools import wraps\n\nfrom flask import Flask, request, jsonify, make_response\nfrom flask_compress import Compress\nfrom flask_cors import CORS\nfrom flask_restplus import Model, Api as RestPlusAPI\nfrom flask_restplus import Resource\nfrom flexget import manager\nfrom flexget.config_schema import process_config\nfrom flexget.utils.database import with_session\nfrom flexget.webserver import User\nfrom jsonschema import RefResolutionError\nfrom werkzeug.http import generate_etag\n\nfrom . import __path__\n\n__version__ = '1.1.2'\n\nlog = logging.getLogger('api')\n\n\nclass APIClient(object):\n \"\"\"\n This is an client which can be used as a more pythonic interface to the rest api.\n\n It skips http, and is only usable from within the running flexget process.\n \"\"\"\n\n def __init__(self):\n self.app = api_app.test_client()\n\n def __getattr__(self, item):\n return APIEndpoint('/api/' + item, self.get_endpoint)\n\n def get_endpoint(self, url, data=None, method=None):\n if method is None:\n method = 'POST' if data is not None else 'GET'\n auth_header = dict(Authorization='Token %s' % api_key())\n response = self.app.open(url, data=data, follow_redirects=True, method=method, headers=auth_header)\n result = json.loads(response.get_data(as_text=True))\n # TODO: Proper exceptions\n if 200 > response.status_code >= 300:\n raise Exception(result['error'])\n return result\n\n\nclass APIEndpoint(object):\n def __init__(self, endpoint, caller):\n self.endpoint = endpoint\n self.caller = caller\n\n def __getattr__(self, item):\n return self.__class__(self.endpoint + '/' + item, self.caller)\n\n __getitem__ = __getattr__\n\n def __call__(self, data=None, method=None):\n return self.caller(self.endpoint, data=data, method=method)\n\n\ndef api_version(f):\n \"\"\" Add the 'API-Version' header to all responses \"\"\"\n\n @wraps(f)\n def wrapped(*args, **kwargs):\n rv = f(*args, **kwargs)\n rv.headers['API-Version'] = __version__\n return rv\n\n return wrapped\n\n\nclass APIResource(Resource):\n \"\"\"All api resources should subclass this class.\"\"\"\n method_decorators = [with_session, api_version]\n\n def __init__(self, api, *args, **kwargs):\n self.manager = manager.manager\n super(APIResource, self).__init__(api, *args, **kwargs)\n\n\nclass APISchemaModel(Model):\n \"\"\"A flask restplus :class:`flask_restplus.models.ApiModel` which can take a json schema directly.\"\"\"\n\n def __init__(self, name, schema, *args, **kwargs):\n super(APISchemaModel, self).__init__(name, *args, **kwargs)\n self._schema = schema\n\n @property\n def __schema__(self):\n if self.__parent__:\n return {\n 'allOf': [\n {'$ref': '#/definitions/{0}'.format(self.__parent__.name)},\n self._schema\n ]\n }\n else:\n return self._schema\n\n def __nonzero__(self):\n return bool(self._schema)\n\n def __bool__(self):\n return self._schema is not None\n\n def __repr__(self):\n return '<ApiSchemaModel(%r)>' % self._schema\n\n\nclass API(RestPlusAPI):\n \"\"\"\n Extends a flask restplus :class:`flask_restplus.Api` with:\n - methods to make using json schemas easier\n - methods to auto document and handle :class:`ApiError` responses\n \"\"\"\n\n def _rewrite_refs(self, schema):\n if isinstance(schema, list):\n for value in schema:\n self._rewrite_refs(value)\n\n if isinstance(schema, dict):\n for key, value in schema.items():\n if isinstance(value, (list, dict)):\n self._rewrite_refs(value)\n\n if key == '$ref' and value.startswith('/'):\n schema[key] = '#definitions%s' % value\n\n def schema(self, name, schema, **kwargs):\n \"\"\"\n Register a json schema.\n\n Usable like :meth:`flask_restplus.Api.model`, except takes a json schema as its argument.\n\n :returns: An :class:`ApiSchemaModel` instance registered to this api.\n \"\"\"\n model = APISchemaModel(name, schema, **kwargs)\n model.__apidoc__.update(kwargs)\n self.models[name] = model\n return model\n\n def inherit(self, name, parent, fields):\n \"\"\"\n Extends :meth:`flask_restplus.Api.inherit` to allow `fields` to be a json schema, if `parent` is a\n :class:`ApiSchemaModel`.\n \"\"\"\n if isinstance(parent, APISchemaModel):\n model = APISchemaModel(name, fields)\n model.__apidoc__['name'] = name\n model.__parent__ = parent\n self.models[name] = model\n return model\n return super(API, self).inherit(name, parent, fields)\n\n def validate(self, model, schema_override=None, description=None):\n \"\"\"\n When a method is decorated with this, json data submitted to the endpoint will be validated with the given\n `model`. This also auto-documents the expected model, as well as the possible :class:`ValidationError` response.\n \"\"\"\n\n def decorator(func):\n @api.expect((model, description))\n @api.response(ValidationError)\n @wraps(func)\n def wrapper(*args, **kwargs):\n payload = request.json\n try:\n schema = schema_override if schema_override else model.__schema__\n errors = process_config(config=payload, schema=schema, set_defaults=False)\n\n if errors:\n raise ValidationError(errors)\n except RefResolutionError as e:\n raise APIError(str(e))\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator\n\n def response(self, code_or_apierror, description='Success', model=None, **kwargs):\n \"\"\"\n Extends :meth:`flask_restplus.Api.response` to allow passing an :class:`ApiError` class instead of\n response code. If an `ApiError` is used, the response code, and expected response model, is automatically\n documented.\n \"\"\"\n try:\n if issubclass(code_or_apierror, APIError):\n description = code_or_apierror.description or description\n return self.doc(\n responses={code_or_apierror.status_code: (description, code_or_apierror.response_model)}, **kwargs)\n except TypeError:\n # If first argument isn't a class this happens\n pass\n return super(API, self).response(code_or_apierror, description, model=model, **kwargs)\n\n def pagination_parser(self, parser=None, sort_choices=None, default=None, add_sort=None):\n \"\"\"\n Return a standardized pagination parser, to be used for any endpoint that has pagination.\n\n :param RequestParser parser: Can extend a given parser or create a new one\n :param tuple sort_choices: A tuple of strings, to be used as server side attribute searches\n :param str default: The default sort string, used `sort_choices[0]` if not given\n :param bool add_sort: Add sort order choices without adding specific sort choices\n\n :return: An api.parser() instance with pagination and sorting arguments.\n \"\"\"\n pagination = parser.copy() if parser else self.parser()\n pagination.add_argument('page', type=int, default=1, help='Page number')\n pagination.add_argument('per_page', type=int, default=50, help='Results per page')\n if sort_choices or add_sort:\n pagination.add_argument('order', choices=('desc', 'asc'), default='desc', help='Sorting order')\n if sort_choices:\n pagination.add_argument('sort_by', choices=sort_choices, default=default or sort_choices[0],\n help='Sort by attribute')\n\n return pagination\n\n\napi_app = Flask(__name__, template_folder=os.path.join(__path__[0], 'templates'))\napi_app.config['REMEMBER_COOKIE_NAME'] = 'flexget.token'\napi_app.config['DEBUG'] = True\napi_app.config['ERROR_404_HELP'] = False\napi_app.url_map.strict_slashes = False\n\nCORS(api_app)\nCompress(api_app)\n\napi = API(\n api_app,\n title='Flexget API v{}'.format(__version__),\n version=__version__,\n description='View and manage flexget core operations and plugins. Open each endpoint view for usage information.'\n ' Navigate to http://flexget.com/API for more details.'\n)\n\nbase_message = {\n 'type': 'object',\n 'properties': {\n 'status_code': {'type': 'integer'},\n 'message': {'type': 'string'},\n 'status': {'type': 'string'}\n },\n 'required': ['status_code', 'message', 'status']\n\n}\n\nbase_message_schema = api.schema('base_message', base_message)\n\n\nclass APIError(Exception):\n description = 'Server error'\n status_code = 500\n status = 'Error'\n response_model = base_message_schema\n\n def __init__(self, message=None, payload=None):\n self.message = message\n self.payload = payload\n\n def to_dict(self):\n rv = self.payload or {}\n rv.update(status_code=self.status_code, message=self.message, status=self.status)\n return rv\n\n @classmethod\n def schema(cls):\n return cls.response_model.__schema__\n\n\nclass NotFoundError(APIError):\n status_code = 404\n description = 'Not found'\n\n\nclass Unauthorized(APIError):\n status_code = 401\n description = 'Unauthorized'\n\n\nclass BadRequest(APIError):\n status_code = 400\n description = 'Bad request'\n\n\nclass Conflict(APIError):\n status_code = 409\n description = 'Conflict'\n\n\nclass PreconditionFailed(APIError):\n status_code = 412\n description = 'Precondition failed'\n\n\nclass NotModified(APIError):\n status_code = 304\n description = 'not modified'\n\n\nclass ValidationError(APIError):\n status_code = 422\n description = 'Validation error'\n\n response_model = api.inherit('validation_error', APIError.response_model, {\n 'type': 'object',\n 'properties': {\n 'validation_errors': {\n 'type': 'array',\n 'items': {\n 'type': 'object',\n 'properties': {\n 'message': {'type': 'string', 'description': 'A human readable message explaining the error.'},\n 'validator': {'type': 'string', 'description': 'The name of the failed validator.'},\n 'validator_value': {\n 'type': 'string', 'description': 'The value for the failed validator in the schema.'\n },\n 'path': {'type': 'string'},\n 'schema_path': {'type': 'string'},\n }\n }\n }\n },\n 'required': ['validation_errors']\n })\n\n verror_attrs = (\n 'message', 'cause', 'validator', 'validator_value',\n 'path', 'schema_path', 'parent'\n )\n\n def __init__(self, validation_errors, message='validation error'):\n payload = {'validation_errors': [self._verror_to_dict(error) for error in validation_errors]}\n super(ValidationError, self).__init__(message, payload=payload)\n\n def _verror_to_dict(self, error):\n error_dict = {}\n for attr in self.verror_attrs:\n if isinstance(getattr(error, attr), deque):\n error_dict[attr] = list(getattr(error, attr))\n else:\n error_dict[attr] = str(getattr(error, attr))\n return error_dict\n\n\nempty_response = api.schema('empty', {'type': 'object'})\n\n\ndef success_response(message, status_code=200, status='success'):\n rsp_dict = {\n 'message': message,\n 'status_code': status_code,\n 'status': status\n }\n rsp = jsonify(rsp_dict)\n rsp.status_code = status_code\n return rsp\n\n\[email protected](APIError)\[email protected](NotFoundError)\[email protected](ValidationError)\[email protected](BadRequest)\[email protected](Unauthorized)\[email protected](Conflict)\[email protected](NotModified)\[email protected](PreconditionFailed)\ndef api_errors(error):\n return error.to_dict(), error.status_code\n\n\n@with_session\ndef api_key(session=None):\n log.debug('fetching token for internal lookup')\n return session.query(User).first().token\n\n\ndef etag(f):\n \"\"\"\n A decorator that add an ETag header to the response and checks for the \"If-Match\" and \"If-Not-Match\" headers to\n return an appropriate response.\n\n :param f: A GET or HEAD flask method to wrap\n :return: The method's response with the ETag and Cache-Control headers, raises a 412 error or returns a 304 response\n \"\"\"\n\n @wraps(f)\n def wrapped(*args, **kwargs):\n # Identify if this is a GET or HEAD in order to proceed\n assert request.method in ['HEAD', 'GET'], '@etag is only supported for GET requests'\n rv = f(*args, **kwargs)\n rv = make_response(rv)\n\n # Some headers can change without data change for specific page\n content_headers = rv.headers.get('link', '') + rv.headers.get('count', '') + rv.headers.get('total-count', '')\n data = (rv.get_data().decode() + content_headers).encode()\n etag = generate_etag(data)\n rv.headers['Cache-Control'] = 'max-age=86400'\n rv.headers['ETag'] = etag\n if_match = request.headers.get('If-Match')\n if_none_match = request.headers.get('If-None-Match')\n\n if if_match:\n etag_list = [tag.strip() for tag in if_match.split(',')]\n if etag not in etag_list and '*' not in etag_list:\n raise PreconditionFailed('etag does not match')\n elif if_none_match:\n etag_list = [tag.strip() for tag in if_none_match.split(',')]\n if etag in etag_list or '*' in etag_list:\n raise NotModified\n\n return rv\n\n return wrapped\n\n\ndef pagination_headers(total_pages, total_items, page_count, request):\n \"\"\"\n Creates the `Link`. 'Count' and 'Total-Count' headers, to be used for pagination traversing\n\n :param total_pages: Total number of pages\n :param total_items: Total number of items in all the pages\n :param page_count: Item count for page (may differ from page size request)\n :param request: The flask request used, required to build other reoccurring vars like url and such.\n :return:\n \"\"\"\n\n # Build constant variables from request data\n url = request.url_root + request.path.lstrip('/')\n per_page = request.args.get('per_page', 50)\n page = int(request.args.get('page', 1))\n\n # Build the base template\n LINKTEMPLATE = '<{}?per_page={}&'.format(url, per_page)\n\n # Removed page and per_page from query string\n query_string = re.sub(b'per_page=\\d+', b'', request.query_string)\n query_string = re.sub(b'page=\\d+', b'', query_string)\n query_string = re.sub(b'&{2,}', b'&', query_string)\n\n # Add all original query params\n LINKTEMPLATE += query_string.decode().lstrip('&') + '&page={}>; rel=\"{}\"'\n\n link_string = ''\n\n if page > 1:\n link_string += LINKTEMPLATE.format(page - 1, 'prev') + ', '\n if page < total_pages:\n link_string += LINKTEMPLATE.format(page + 1, 'next') + ', '\n link_string += LINKTEMPLATE.format(total_pages, 'last')\n\n return {\n 'Link': link_string,\n 'Total-Count': total_items,\n 'Count': page_count\n }\n",
"path": "flexget/api/app.py"
}
] | diff --git a/flexget/api/app.py b/flexget/api/app.py
index 613965ca86..218ab3bf78 100644
--- a/flexget/api/app.py
+++ b/flexget/api/app.py
@@ -231,6 +231,7 @@ def pagination_parser(self, parser=None, sort_choices=None, default=None, add_so
api_app.config['REMEMBER_COOKIE_NAME'] = 'flexget.token'
api_app.config['DEBUG'] = True
api_app.config['ERROR_404_HELP'] = False
+api_app.url_map.strict_slashes = False
CORS(api_app)
Compress(api_app)
|
keras-team__autokeras-1285 | How use multiple gpu?
### Feature Description
I want to use a single machine with multiple gpu for training, but it seems to have no actual effect### Code Example
```python
with strategy.scope():
```
### Reason
Speed up the calculation of toxins
### Solution
<!---
Please tell us how to implement the feature,
if you have one in mind.
-->
| [
{
"content": "# Copyright 2020 The AutoKeras Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kerastuner\nimport tensorflow as tf\nfrom tensorflow.python.util import nest\n\nfrom autokeras import blocks as blocks_module\nfrom autokeras import nodes as nodes_module\nfrom autokeras.engine import head as head_module\nfrom autokeras.engine import serializable\nfrom autokeras.utils import utils\n\n\ndef feature_encoding_input(block):\n \"\"\"Fetch the column_types and column_names.\n\n The values are fetched for FeatureEncoding from StructuredDataInput.\n \"\"\"\n if not isinstance(block.inputs[0], nodes_module.StructuredDataInput):\n raise TypeError(\n \"CategoricalToNumerical can only be used with StructuredDataInput.\"\n )\n block.column_types = block.inputs[0].column_types\n block.column_names = block.inputs[0].column_names\n\n\n# Compile the graph.\nCOMPILE_FUNCTIONS = {\n blocks_module.StructuredDataBlock: [feature_encoding_input],\n blocks_module.CategoricalToNumerical: [feature_encoding_input],\n}\n\n\ndef load_graph(filepath, custom_objects=None):\n if custom_objects is None:\n custom_objects = {}\n with tf.keras.utils.custom_object_scope(custom_objects):\n return Graph.from_config(utils.load_json(filepath))\n\n\nclass Graph(kerastuner.HyperModel, serializable.Serializable):\n \"\"\"A graph consists of connected Blocks, or Heads.\n\n # Arguments\n inputs: A list of input node(s) for the Graph.\n outputs: A list of output node(s) for the Graph.\n override_hps: A list of HyperParameters. The predefined HyperParameters that\n will override the space of the Hyperparameters defined in the Hypermodels\n with the same names.\n \"\"\"\n\n def __init__(self, inputs=None, outputs=None, override_hps=None):\n super().__init__()\n self.inputs = nest.flatten(inputs)\n self.outputs = nest.flatten(outputs)\n self._node_to_id = {}\n self._nodes = []\n self.blocks = []\n self._block_to_id = {}\n if inputs and outputs:\n self._build_network()\n self.override_hps = override_hps or []\n\n def compile(self):\n \"\"\"Share the information between blocks.\"\"\"\n for block in self.blocks:\n for func in COMPILE_FUNCTIONS.get(block.__class__, []):\n func(block)\n\n def _register_hps(self, hp):\n \"\"\"Register the override HyperParameters for current HyperParameters.\"\"\"\n for single_hp in self.override_hps:\n name = single_hp.name\n if name not in hp.values:\n hp._register(single_hp)\n hp.values[name] = single_hp.default\n\n def _build_network(self):\n self._node_to_id = {}\n\n # Recursively find all the interested nodes.\n for input_node in self.inputs:\n self._search_network(input_node, self.outputs, set(), set())\n self._nodes = sorted(\n list(self._node_to_id.keys()), key=lambda x: self._node_to_id[x]\n )\n\n for node in self.inputs + self.outputs:\n if node not in self._node_to_id:\n raise ValueError(\"Inputs and outputs not connected.\")\n\n # Find the blocks.\n blocks = []\n for input_node in self._nodes:\n for block in input_node.out_blocks:\n if (\n any(\n [\n output_node in self._node_to_id\n for output_node in block.outputs\n ]\n )\n and block not in blocks\n ):\n blocks.append(block)\n\n # Check if all the inputs of the blocks are set as inputs.\n for block in blocks:\n for input_node in block.inputs:\n if input_node not in self._node_to_id:\n raise ValueError(\n \"A required input is missing for HyperModel \"\n \"{name}.\".format(name=block.name)\n )\n\n # Calculate the in degree of all the nodes\n in_degree = [0] * len(self._nodes)\n for node_id, node in enumerate(self._nodes):\n in_degree[node_id] = len(\n [block for block in node.in_blocks if block in blocks]\n )\n\n # Add the blocks in topological order.\n self.blocks = []\n self._block_to_id = {}\n while len(blocks) != 0:\n new_added = []\n\n # Collect blocks with in degree 0.\n for block in blocks:\n if any([in_degree[self._node_to_id[node]] for node in block.inputs]):\n continue\n new_added.append(block)\n\n # Remove the collected blocks from blocks.\n for block in new_added:\n blocks.remove(block)\n\n for block in new_added:\n # Add the collected blocks to the Graph.\n self._add_block(block)\n\n # Decrease the in degree of the output nodes.\n for output_node in block.outputs:\n output_node_id = self._node_to_id[output_node]\n in_degree[output_node_id] -= 1\n\n def _search_network(self, input_node, outputs, in_stack_nodes, visited_nodes):\n visited_nodes.add(input_node)\n in_stack_nodes.add(input_node)\n\n outputs_reached = False\n if input_node in outputs:\n outputs_reached = True\n\n for block in input_node.out_blocks:\n for output_node in block.outputs:\n if output_node in in_stack_nodes:\n raise ValueError(\"The network has a cycle.\")\n if output_node not in visited_nodes:\n self._search_network(\n output_node, outputs, in_stack_nodes, visited_nodes\n )\n if output_node in self._node_to_id.keys():\n outputs_reached = True\n\n if outputs_reached:\n self._add_node(input_node)\n\n in_stack_nodes.remove(input_node)\n\n def _add_block(self, block):\n if block not in self.blocks:\n block_id = len(self.blocks)\n self._block_to_id[block] = block_id\n self.blocks.append(block)\n\n def _add_node(self, input_node):\n if input_node not in self._node_to_id:\n self._node_to_id[input_node] = len(self._node_to_id)\n\n def get_config(self):\n blocks = [blocks_module.serialize(block) for block in self.blocks]\n nodes = {\n str(self._node_to_id[node]): nodes_module.serialize(node)\n for node in self.inputs\n }\n override_hps = [\n kerastuner.engine.hyperparameters.serialize(hp)\n for hp in self.override_hps\n ]\n block_inputs = {\n str(block_id): [self._node_to_id[node] for node in block.inputs]\n for block_id, block in enumerate(self.blocks)\n }\n block_outputs = {\n str(block_id): [self._node_to_id[node] for node in block.outputs]\n for block_id, block in enumerate(self.blocks)\n }\n\n outputs = [self._node_to_id[node] for node in self.outputs]\n\n return {\n \"override_hps\": override_hps, # List [serialized].\n \"blocks\": blocks, # Dict {id: serialized}.\n \"nodes\": nodes, # Dict {id: serialized}.\n \"outputs\": outputs, # List of node_ids.\n \"block_inputs\": block_inputs, # Dict {id: List of node_ids}.\n \"block_outputs\": block_outputs, # Dict {id: List of node_ids}.\n }\n\n @classmethod\n def from_config(cls, config):\n blocks = [blocks_module.deserialize(block) for block in config[\"blocks\"]]\n nodes = {\n int(node_id): nodes_module.deserialize(node)\n for node_id, node in config[\"nodes\"].items()\n }\n override_hps = [\n kerastuner.engine.hyperparameters.deserialize(config)\n for config in config[\"override_hps\"]\n ]\n\n inputs = [nodes[node_id] for node_id in nodes]\n for block_id, block in enumerate(blocks):\n input_nodes = [\n nodes[node_id] for node_id in config[\"block_inputs\"][str(block_id)]\n ]\n output_nodes = nest.flatten(block(input_nodes))\n for output_node, node_id in zip(\n output_nodes, config[\"block_outputs\"][str(block_id)]\n ):\n nodes[node_id] = output_node\n\n outputs = [nodes[node_id] for node_id in config[\"outputs\"]]\n return cls(inputs=inputs, outputs=outputs, override_hps=override_hps)\n\n def build(self, hp):\n \"\"\"Build the HyperModel into a Keras Model.\"\"\"\n tf.keras.backend.clear_session()\n self._register_hps(hp)\n self.compile()\n real_nodes = {}\n for input_node in self.inputs:\n node_id = self._node_to_id[input_node]\n real_nodes[node_id] = input_node.build()\n for block in self.blocks:\n temp_inputs = [\n real_nodes[self._node_to_id[input_node]]\n for input_node in block.inputs\n ]\n outputs = block.build(hp, inputs=temp_inputs)\n outputs = nest.flatten(outputs)\n for output_node, real_output_node in zip(block.outputs, outputs):\n real_nodes[self._node_to_id[output_node]] = real_output_node\n model = tf.keras.Model(\n [real_nodes[self._node_to_id[input_node]] for input_node in self.inputs],\n [\n real_nodes[self._node_to_id[output_node]]\n for output_node in self.outputs\n ],\n )\n\n return self._compile_keras_model(hp, model)\n\n def _get_metrics(self):\n metrics = {}\n for output_node in self.outputs:\n block = output_node.in_blocks[0]\n if isinstance(block, head_module.Head):\n metrics[block.name] = block.metrics\n return metrics\n\n def _get_loss(self):\n loss = {}\n for output_node in self.outputs:\n block = output_node.in_blocks[0]\n if isinstance(block, head_module.Head):\n loss[block.name] = block.loss\n return loss\n\n def _compile_keras_model(self, hp, model):\n # Specify hyperparameters from compile(...)\n optimizer_name = hp.Choice(\n \"optimizer\", [\"adam\", \"adadelta\", \"sgd\"], default=\"adam\"\n )\n learning_rate = hp.Choice(\n \"learning_rate\", [1e-1, 1e-2, 1e-3, 1e-4, 1e-5], default=1e-3\n )\n\n if optimizer_name == \"adam\":\n optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n elif optimizer_name == \"adadelta\":\n optimizer = tf.keras.optimizers.Adadelta(learning_rate=learning_rate)\n elif optimizer_name == \"sgd\":\n optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)\n\n model.compile(\n optimizer=optimizer, metrics=self._get_metrics(), loss=self._get_loss()\n )\n\n return model\n\n def save(self, filepath):\n utils.save_json(filepath, self.get_config())\n",
"path": "autokeras/graph.py"
}
] | [
{
"content": "# Copyright 2020 The AutoKeras Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kerastuner\nimport tensorflow as tf\nfrom tensorflow.python.util import nest\n\nfrom autokeras import blocks as blocks_module\nfrom autokeras import nodes as nodes_module\nfrom autokeras.engine import head as head_module\nfrom autokeras.engine import serializable\nfrom autokeras.utils import utils\n\n\ndef feature_encoding_input(block):\n \"\"\"Fetch the column_types and column_names.\n\n The values are fetched for FeatureEncoding from StructuredDataInput.\n \"\"\"\n if not isinstance(block.inputs[0], nodes_module.StructuredDataInput):\n raise TypeError(\n \"CategoricalToNumerical can only be used with StructuredDataInput.\"\n )\n block.column_types = block.inputs[0].column_types\n block.column_names = block.inputs[0].column_names\n\n\n# Compile the graph.\nCOMPILE_FUNCTIONS = {\n blocks_module.StructuredDataBlock: [feature_encoding_input],\n blocks_module.CategoricalToNumerical: [feature_encoding_input],\n}\n\n\ndef load_graph(filepath, custom_objects=None):\n if custom_objects is None:\n custom_objects = {}\n with tf.keras.utils.custom_object_scope(custom_objects):\n return Graph.from_config(utils.load_json(filepath))\n\n\nclass Graph(kerastuner.HyperModel, serializable.Serializable):\n \"\"\"A graph consists of connected Blocks, or Heads.\n\n # Arguments\n inputs: A list of input node(s) for the Graph.\n outputs: A list of output node(s) for the Graph.\n override_hps: A list of HyperParameters. The predefined HyperParameters that\n will override the space of the Hyperparameters defined in the Hypermodels\n with the same names.\n \"\"\"\n\n def __init__(self, inputs=None, outputs=None, override_hps=None):\n super().__init__()\n self.inputs = nest.flatten(inputs)\n self.outputs = nest.flatten(outputs)\n self._node_to_id = {}\n self._nodes = []\n self.blocks = []\n self._block_to_id = {}\n if inputs and outputs:\n self._build_network()\n self.override_hps = override_hps or []\n\n def compile(self):\n \"\"\"Share the information between blocks.\"\"\"\n for block in self.blocks:\n for func in COMPILE_FUNCTIONS.get(block.__class__, []):\n func(block)\n\n def _register_hps(self, hp):\n \"\"\"Register the override HyperParameters for current HyperParameters.\"\"\"\n for single_hp in self.override_hps:\n name = single_hp.name\n if name not in hp.values:\n hp._register(single_hp)\n hp.values[name] = single_hp.default\n\n def _build_network(self):\n self._node_to_id = {}\n\n # Recursively find all the interested nodes.\n for input_node in self.inputs:\n self._search_network(input_node, self.outputs, set(), set())\n self._nodes = sorted(\n list(self._node_to_id.keys()), key=lambda x: self._node_to_id[x]\n )\n\n for node in self.inputs + self.outputs:\n if node not in self._node_to_id:\n raise ValueError(\"Inputs and outputs not connected.\")\n\n # Find the blocks.\n blocks = []\n for input_node in self._nodes:\n for block in input_node.out_blocks:\n if (\n any(\n [\n output_node in self._node_to_id\n for output_node in block.outputs\n ]\n )\n and block not in blocks\n ):\n blocks.append(block)\n\n # Check if all the inputs of the blocks are set as inputs.\n for block in blocks:\n for input_node in block.inputs:\n if input_node not in self._node_to_id:\n raise ValueError(\n \"A required input is missing for HyperModel \"\n \"{name}.\".format(name=block.name)\n )\n\n # Calculate the in degree of all the nodes\n in_degree = [0] * len(self._nodes)\n for node_id, node in enumerate(self._nodes):\n in_degree[node_id] = len(\n [block for block in node.in_blocks if block in blocks]\n )\n\n # Add the blocks in topological order.\n self.blocks = []\n self._block_to_id = {}\n while len(blocks) != 0:\n new_added = []\n\n # Collect blocks with in degree 0.\n for block in blocks:\n if any([in_degree[self._node_to_id[node]] for node in block.inputs]):\n continue\n new_added.append(block)\n\n # Remove the collected blocks from blocks.\n for block in new_added:\n blocks.remove(block)\n\n for block in new_added:\n # Add the collected blocks to the Graph.\n self._add_block(block)\n\n # Decrease the in degree of the output nodes.\n for output_node in block.outputs:\n output_node_id = self._node_to_id[output_node]\n in_degree[output_node_id] -= 1\n\n def _search_network(self, input_node, outputs, in_stack_nodes, visited_nodes):\n visited_nodes.add(input_node)\n in_stack_nodes.add(input_node)\n\n outputs_reached = False\n if input_node in outputs:\n outputs_reached = True\n\n for block in input_node.out_blocks:\n for output_node in block.outputs:\n if output_node in in_stack_nodes:\n raise ValueError(\"The network has a cycle.\")\n if output_node not in visited_nodes:\n self._search_network(\n output_node, outputs, in_stack_nodes, visited_nodes\n )\n if output_node in self._node_to_id.keys():\n outputs_reached = True\n\n if outputs_reached:\n self._add_node(input_node)\n\n in_stack_nodes.remove(input_node)\n\n def _add_block(self, block):\n if block not in self.blocks:\n block_id = len(self.blocks)\n self._block_to_id[block] = block_id\n self.blocks.append(block)\n\n def _add_node(self, input_node):\n if input_node not in self._node_to_id:\n self._node_to_id[input_node] = len(self._node_to_id)\n\n def get_config(self):\n blocks = [blocks_module.serialize(block) for block in self.blocks]\n nodes = {\n str(self._node_to_id[node]): nodes_module.serialize(node)\n for node in self.inputs\n }\n override_hps = [\n kerastuner.engine.hyperparameters.serialize(hp)\n for hp in self.override_hps\n ]\n block_inputs = {\n str(block_id): [self._node_to_id[node] for node in block.inputs]\n for block_id, block in enumerate(self.blocks)\n }\n block_outputs = {\n str(block_id): [self._node_to_id[node] for node in block.outputs]\n for block_id, block in enumerate(self.blocks)\n }\n\n outputs = [self._node_to_id[node] for node in self.outputs]\n\n return {\n \"override_hps\": override_hps, # List [serialized].\n \"blocks\": blocks, # Dict {id: serialized}.\n \"nodes\": nodes, # Dict {id: serialized}.\n \"outputs\": outputs, # List of node_ids.\n \"block_inputs\": block_inputs, # Dict {id: List of node_ids}.\n \"block_outputs\": block_outputs, # Dict {id: List of node_ids}.\n }\n\n @classmethod\n def from_config(cls, config):\n blocks = [blocks_module.deserialize(block) for block in config[\"blocks\"]]\n nodes = {\n int(node_id): nodes_module.deserialize(node)\n for node_id, node in config[\"nodes\"].items()\n }\n override_hps = [\n kerastuner.engine.hyperparameters.deserialize(config)\n for config in config[\"override_hps\"]\n ]\n\n inputs = [nodes[node_id] for node_id in nodes]\n for block_id, block in enumerate(blocks):\n input_nodes = [\n nodes[node_id] for node_id in config[\"block_inputs\"][str(block_id)]\n ]\n output_nodes = nest.flatten(block(input_nodes))\n for output_node, node_id in zip(\n output_nodes, config[\"block_outputs\"][str(block_id)]\n ):\n nodes[node_id] = output_node\n\n outputs = [nodes[node_id] for node_id in config[\"outputs\"]]\n return cls(inputs=inputs, outputs=outputs, override_hps=override_hps)\n\n def build(self, hp):\n \"\"\"Build the HyperModel into a Keras Model.\"\"\"\n self._register_hps(hp)\n self.compile()\n real_nodes = {}\n for input_node in self.inputs:\n node_id = self._node_to_id[input_node]\n real_nodes[node_id] = input_node.build()\n for block in self.blocks:\n temp_inputs = [\n real_nodes[self._node_to_id[input_node]]\n for input_node in block.inputs\n ]\n outputs = block.build(hp, inputs=temp_inputs)\n outputs = nest.flatten(outputs)\n for output_node, real_output_node in zip(block.outputs, outputs):\n real_nodes[self._node_to_id[output_node]] = real_output_node\n model = tf.keras.Model(\n [real_nodes[self._node_to_id[input_node]] for input_node in self.inputs],\n [\n real_nodes[self._node_to_id[output_node]]\n for output_node in self.outputs\n ],\n )\n\n return self._compile_keras_model(hp, model)\n\n def _get_metrics(self):\n metrics = {}\n for output_node in self.outputs:\n block = output_node.in_blocks[0]\n if isinstance(block, head_module.Head):\n metrics[block.name] = block.metrics\n return metrics\n\n def _get_loss(self):\n loss = {}\n for output_node in self.outputs:\n block = output_node.in_blocks[0]\n if isinstance(block, head_module.Head):\n loss[block.name] = block.loss\n return loss\n\n def _compile_keras_model(self, hp, model):\n # Specify hyperparameters from compile(...)\n optimizer_name = hp.Choice(\n \"optimizer\", [\"adam\", \"adadelta\", \"sgd\"], default=\"adam\"\n )\n learning_rate = hp.Choice(\n \"learning_rate\", [1e-1, 1e-2, 1e-3, 1e-4, 1e-5], default=1e-3\n )\n\n if optimizer_name == \"adam\":\n optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n elif optimizer_name == \"adadelta\":\n optimizer = tf.keras.optimizers.Adadelta(learning_rate=learning_rate)\n elif optimizer_name == \"sgd\":\n optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)\n\n model.compile(\n optimizer=optimizer, metrics=self._get_metrics(), loss=self._get_loss()\n )\n\n return model\n\n def save(self, filepath):\n utils.save_json(filepath, self.get_config())\n",
"path": "autokeras/graph.py"
}
] | diff --git a/autokeras/graph.py b/autokeras/graph.py
index 9321740b6..555b118fe 100644
--- a/autokeras/graph.py
+++ b/autokeras/graph.py
@@ -249,7 +249,6 @@ def from_config(cls, config):
def build(self, hp):
"""Build the HyperModel into a Keras Model."""
- tf.keras.backend.clear_session()
self._register_hps(hp)
self.compile()
real_nodes = {}
diff --git a/tests/autokeras/engine/tuner_test.py b/tests/autokeras/engine/tuner_test.py
index 3a7c827ad..53ae3d994 100644
--- a/tests/autokeras/engine/tuner_test.py
+++ b/tests/autokeras/engine/tuner_test.py
@@ -120,6 +120,15 @@ def test_tuner_not_call_super_search_with_overwrite(
super_search.assert_not_called()
+def test_tuner_does_not_crash_with_distribution_strategy(tmp_path):
+ tuner = greedy.Greedy(
+ hypermodel=utils.build_graph(),
+ directory=tmp_path,
+ distribution_strategy=tf.distribute.MirroredStrategy(),
+ )
+ tuner.hypermodel.build(tuner.oracle.hyperparameters)
+
+
def test_preprocessing_adapt():
class MockLayer(preprocessing.TextVectorization):
def adapt(self, *args, **kwargs):
diff --git a/tests/autokeras/tuners/task_specific_test.py b/tests/autokeras/tuners/task_specific_test.py
index ee20c72ce..2ffd8b550 100644
--- a/tests/autokeras/tuners/task_specific_test.py
+++ b/tests/autokeras/tuners/task_specific_test.py
@@ -24,6 +24,7 @@
@pytest.fixture
def clear_session():
+ tf.keras.backend.clear_session()
yield
tf.keras.backend.clear_session()
|
rasterio__rasterio-801 | Use .dev versioning, pre-releases
With the 1.0 release coming and likely a few PRs with breaking changes, it would be helpful to start doing dev releases so we can maximize the number of eyes.
**Proposal**: Change version to `1.0.dev1` now, as we make changes, cut pre-release wheels and distribute them via on pypi.
We'll also likely want to do a legit `alpha`, `beta`, `rc`, `final` cycle for the 1.0 release.
@sgillies @geowurster @brendan-ward sound good?
| [
{
"content": "\"\"\"Rasterio\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom collections import namedtuple\nimport logging\ntry:\n from logging import NullHandler\nexcept ImportError: # pragma: no cover\n class NullHandler(logging.Handler):\n def emit(self, record):\n pass\nimport warnings\n\nfrom rasterio._base import (\n eval_window, window_shape, window_index, gdal_version)\nfrom rasterio.dtypes import (\n bool_, ubyte, uint8, uint16, int16, uint32, int32, float32, float64,\n complex_, check_dtype)\nfrom rasterio.env import ensure_env, Env\nfrom rasterio.compat import string_types\nfrom rasterio.profiles import default_gtiff_profile\nfrom rasterio.transform import Affine, guard_transform\nfrom rasterio.vfs import parse_path\nfrom rasterio import windows\n\n# These modules are imported from the Cython extensions, but are also import\n# here to help tools like cx_Freeze find them automatically\nfrom rasterio import _err, coords, enums, vfs\n\n# Classes in rasterio._io are imported below just before we need them.\n\n__all__ = [\n 'band', 'open', 'copy', 'pad']\n__version__ = \"0.36.0\"\n__gdal_version__ = gdal_version()\n\n# Rasterio attaches NullHandler to the 'rasterio' logger and its\n# descendents. See\n# https://docs.python.org/2/howto/logging.html#configuring-logging-for-a-library\n# Applications must attach their own handlers in order to see messages.\n# See rasterio/rio/main.py for an example.\nlog = logging.getLogger(__name__)\nlog.addHandler(NullHandler())\n\n\n@ensure_env\ndef open(path, mode='r', driver=None, width=None, height=None,\n count=None, crs=None, transform=None, dtype=None, nodata=None,\n **kwargs):\n \"\"\"Open file at ``path`` in ``mode`` 'r' (read), 'r+' (read and\n write), or 'w' (write) and return a dataset Reader or Updater\n object.\n\n In write mode, a driver name such as \"GTiff\" or \"JPEG\" (see GDAL\n docs or ``gdal_translate --help`` on the command line),\n ``width`` (number of pixels per line) and ``height`` (number of\n lines), the ``count`` number of bands in the new file must be\n specified. Additionally, the data type for bands such as\n ``rasterio.ubyte`` for 8-bit bands or ``rasterio.uint16`` for\n 16-bit bands must be specified using the ``dtype`` argument.\n\n Parameters\n ----------\n mode: string\n \"r\" (read), \"r+\" (read/write), or \"w\" (write)\n driver: string\n driver code specifying the format name (e.g. \"GTiff\" or\n \"JPEG\"). See GDAL docs at\n http://www.gdal.org/formats_list.html (optional, required\n for writing).\n width: int\n number of pixels per line\n (optional, required for write)\n height: int\n number of lines\n (optional, required for write)\n count: int > 0\n number of bands\n (optional, required for write)\n dtype: rasterio.dtype\n the data type for bands such as ``rasterio.ubyte`` for\n 8-bit bands or ``rasterio.uint16`` for 16-bit bands\n (optional, required for write)\n crs: dict or string\n Coordinate reference system\n (optional, recommended for write)\n transform: Affine instance\n Affine transformation mapping the pixel space to geographic\n space (optional, recommended for writing).\n nodata: number\n Defines pixel value to be interpreted as null/nodata\n (optional, recommended for write)\n\n Returns\n -------\n A ``DatasetReader`` or ``DatasetUpdater`` object.\n\n Notes\n -----\n In write mode, you must specify at least ``width``, ``height``,\n ``count`` and ``dtype``.\n\n A coordinate reference system for raster datasets in write mode\n can be defined by the ``crs`` argument. It takes Proj4 style\n mappings like\n\n .. code::\n\n {'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84',\n 'no_defs': True}\n\n An affine transformation that maps ``col,row`` pixel coordinates\n to ``x,y`` coordinates in the coordinate reference system can be\n specified using the ``transform`` argument. The value should be\n an instance of ``affine.Affine``\n\n .. code:: python\n\n >>> from affine import Affine\n >>> transform = Affine(0.5, 0.0, -180.0, 0.0, -0.5, 90.0)\n\n These coefficients are shown in the figure below.\n\n .. code::\n\n | x | | a b c | | c |\n | y | = | d e f | | r |\n | 1 | | 0 0 1 | | 1 |\n\n a: rate of change of X with respect to increasing column,\n i.e. pixel width\n b: rotation, 0 if the raster is oriented \"north up\"\n c: X coordinate of the top left corner of the top left pixel\n d: rotation, 0 if the raster is oriented \"north up\"\n e: rate of change of Y with respect to increasing row,\n usually a negative number (i.e. -1 * pixel height) if\n north-up.\n f: Y coordinate of the top left corner of the top left pixel\n\n A 6-element sequence of the affine transformation matrix\n coefficients in ``c, a, b, f, d, e`` order, (i.e. GDAL\n geotransform order) will be accepted until 1.0 (deprecated).\n\n A virtual filesystem can be specified. The ``vfs`` parameter may\n be an Apache Commons VFS style string beginning with \"zip://\" or\n \"tar://\"\". In this case, the ``path`` must be an absolute path\n within that container.\n\n \"\"\"\n if not isinstance(path, string_types):\n raise TypeError(\"invalid path: {0!r}\".format(path))\n if mode and not isinstance(mode, string_types):\n raise TypeError(\"invalid mode: {0!r}\".format(mode))\n if driver and not isinstance(driver, string_types):\n raise TypeError(\"invalid driver: {0!r}\".format(driver))\n if dtype and not check_dtype(dtype):\n raise TypeError(\"invalid dtype: {0!r}\".format(dtype))\n if transform:\n transform = guard_transform(transform)\n elif 'affine' in kwargs:\n affine = kwargs.pop('affine')\n transform = guard_transform(affine)\n\n # Get AWS credentials if we're attempting to access a raster\n # on S3.\n pth, archive, scheme = parse_path(path)\n if scheme == 's3':\n Env().get_aws_credentials()\n log.debug(\"AWS credentials have been obtained\")\n\n # Create dataset instances and pass the given env, which will\n # be taken over by the dataset's context manager if it is not\n # None.\n if mode == 'r':\n from rasterio._io import RasterReader\n s = RasterReader(path)\n elif mode == 'r+':\n from rasterio._io import writer\n s = writer(path, mode)\n elif mode == 'r-':\n from rasterio._base import DatasetReader\n s = DatasetReader(path)\n elif mode == 'w':\n from rasterio._io import writer\n s = writer(path, mode, driver=driver,\n width=width, height=height, count=count,\n crs=crs, transform=transform, dtype=dtype,\n nodata=nodata, **kwargs)\n else:\n raise ValueError(\n \"mode string must be one of 'r', 'r+', or 'w', not %s\" % mode)\n s.start()\n return s\n\n\n@ensure_env\ndef copy(src, dst, **kw):\n \"\"\"Copy a source raster to a new destination with driver specific\n creation options.\n\n Parameters\n ----------\n src: string\n an existing raster file\n dst: string\n valid path to output file.\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError:\n If source path is not a valid Dataset\n\n Notes\n -----\n A ``driver`` keyword argument with value like 'GTiff' or 'JPEG' is\n used to control the output format.\n\n This is the one way to create write-once files like JPEGs.\n \"\"\"\n from rasterio._copy import RasterCopier\n return RasterCopier()(src, dst, **kw)\n\n\ndef drivers(**kwargs):\n \"\"\"Create a gdal environment with registered drivers and creation\n options.\n\n This function is deprecated; please use ``env.Env`` instead.\n\n Parameters\n ----------\n **kwargs:: keyword arguments\n Configuration options that define GDAL driver behavior\n\n See https://trac.osgeo.org/gdal/wiki/ConfigOptions\n\n Returns\n -------\n GDALEnv responsible for managing the environment.\n\n Notes\n -----\n Use as a context manager, ``with rasterio.drivers(): ...``\n \"\"\"\n warnings.warn(\"Deprecated; Use env.Env instead\", DeprecationWarning)\n return Env(**kwargs)\n\n\nBand = namedtuple('Band', ['ds', 'bidx', 'dtype', 'shape'])\n\ndef band(ds, bidx):\n \"\"\"Wraps a dataset and a band index up as a 'Band'\n\n Parameters\n ----------\n ds: rasterio.RasterReader\n Open rasterio dataset\n bidx: int\n Band number, index starting at 1\n\n Returns\n -------\n a rasterio.Band\n \"\"\"\n return Band(\n ds,\n bidx,\n set(ds.dtypes).pop(),\n ds.shape)\n\n\ndef pad(array, transform, pad_width, mode=None, **kwargs):\n \"\"\"pad array and adjust affine transform matrix.\n\n Parameters\n ----------\n array: ndarray\n Numpy ndarray, for best results a 2D array\n transform: Affine transform\n transform object mapping pixel space to coordinates\n pad_width: int\n number of pixels to pad array on all four\n mode: str or function\n define the method for determining padded values\n\n Returns\n -------\n (array, transform): tuple\n Tuple of new array and affine transform\n\n Notes\n -----\n See numpy docs for details on mode and other kwargs:\n http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.pad.html\n \"\"\"\n import numpy as np\n transform = guard_transform(transform)\n padded_array = np.pad(array, pad_width, mode, **kwargs)\n padded_trans = list(transform)\n padded_trans[2] -= pad_width * padded_trans[0]\n padded_trans[5] -= pad_width * padded_trans[4]\n return padded_array, Affine(*padded_trans[:6])\n\n\ndef get_data_window(arr, nodata=None):\n warnings.warn(\"Deprecated; Use rasterio.windows instead\", DeprecationWarning)\n return windows.get_data_window(arr, nodata)\n\n\ndef window_union(data):\n warnings.warn(\"Deprecated; Use rasterio.windows instead\", DeprecationWarning)\n return windows.union(data)\n\n\ndef window_intersection(data):\n warnings.warn(\"Deprecated; Use rasterio.windows instead\", DeprecationWarning)\n return windows.intersection(data)\n\ndef windows_intersect(data):\n warnings.warn(\"Deprecated; Use rasterio.windows instead\", DeprecationWarning)\n return windows.intersect(data)\n",
"path": "rasterio/__init__.py"
}
] | [
{
"content": "\"\"\"Rasterio\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom collections import namedtuple\nimport logging\ntry:\n from logging import NullHandler\nexcept ImportError: # pragma: no cover\n class NullHandler(logging.Handler):\n def emit(self, record):\n pass\nimport warnings\n\nfrom rasterio._base import (\n eval_window, window_shape, window_index, gdal_version)\nfrom rasterio.dtypes import (\n bool_, ubyte, uint8, uint16, int16, uint32, int32, float32, float64,\n complex_, check_dtype)\nfrom rasterio.env import ensure_env, Env\nfrom rasterio.compat import string_types\nfrom rasterio.profiles import default_gtiff_profile\nfrom rasterio.transform import Affine, guard_transform\nfrom rasterio.vfs import parse_path\nfrom rasterio import windows\n\n# These modules are imported from the Cython extensions, but are also import\n# here to help tools like cx_Freeze find them automatically\nfrom rasterio import _err, coords, enums, vfs\n\n# Classes in rasterio._io are imported below just before we need them.\n\n__all__ = [\n 'band', 'open', 'copy', 'pad']\n__version__ = \"1.0.dev1\"\n__gdal_version__ = gdal_version()\n\n# Rasterio attaches NullHandler to the 'rasterio' logger and its\n# descendents. See\n# https://docs.python.org/2/howto/logging.html#configuring-logging-for-a-library\n# Applications must attach their own handlers in order to see messages.\n# See rasterio/rio/main.py for an example.\nlog = logging.getLogger(__name__)\nlog.addHandler(NullHandler())\n\n\n@ensure_env\ndef open(path, mode='r', driver=None, width=None, height=None,\n count=None, crs=None, transform=None, dtype=None, nodata=None,\n **kwargs):\n \"\"\"Open file at ``path`` in ``mode`` 'r' (read), 'r+' (read and\n write), or 'w' (write) and return a dataset Reader or Updater\n object.\n\n In write mode, a driver name such as \"GTiff\" or \"JPEG\" (see GDAL\n docs or ``gdal_translate --help`` on the command line),\n ``width`` (number of pixels per line) and ``height`` (number of\n lines), the ``count`` number of bands in the new file must be\n specified. Additionally, the data type for bands such as\n ``rasterio.ubyte`` for 8-bit bands or ``rasterio.uint16`` for\n 16-bit bands must be specified using the ``dtype`` argument.\n\n Parameters\n ----------\n mode: string\n \"r\" (read), \"r+\" (read/write), or \"w\" (write)\n driver: string\n driver code specifying the format name (e.g. \"GTiff\" or\n \"JPEG\"). See GDAL docs at\n http://www.gdal.org/formats_list.html (optional, required\n for writing).\n width: int\n number of pixels per line\n (optional, required for write)\n height: int\n number of lines\n (optional, required for write)\n count: int > 0\n number of bands\n (optional, required for write)\n dtype: rasterio.dtype\n the data type for bands such as ``rasterio.ubyte`` for\n 8-bit bands or ``rasterio.uint16`` for 16-bit bands\n (optional, required for write)\n crs: dict or string\n Coordinate reference system\n (optional, recommended for write)\n transform: Affine instance\n Affine transformation mapping the pixel space to geographic\n space (optional, recommended for writing).\n nodata: number\n Defines pixel value to be interpreted as null/nodata\n (optional, recommended for write)\n\n Returns\n -------\n A ``DatasetReader`` or ``DatasetUpdater`` object.\n\n Notes\n -----\n In write mode, you must specify at least ``width``, ``height``,\n ``count`` and ``dtype``.\n\n A coordinate reference system for raster datasets in write mode\n can be defined by the ``crs`` argument. It takes Proj4 style\n mappings like\n\n .. code::\n\n {'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84',\n 'no_defs': True}\n\n An affine transformation that maps ``col,row`` pixel coordinates\n to ``x,y`` coordinates in the coordinate reference system can be\n specified using the ``transform`` argument. The value should be\n an instance of ``affine.Affine``\n\n .. code:: python\n\n >>> from affine import Affine\n >>> transform = Affine(0.5, 0.0, -180.0, 0.0, -0.5, 90.0)\n\n These coefficients are shown in the figure below.\n\n .. code::\n\n | x | | a b c | | c |\n | y | = | d e f | | r |\n | 1 | | 0 0 1 | | 1 |\n\n a: rate of change of X with respect to increasing column,\n i.e. pixel width\n b: rotation, 0 if the raster is oriented \"north up\"\n c: X coordinate of the top left corner of the top left pixel\n d: rotation, 0 if the raster is oriented \"north up\"\n e: rate of change of Y with respect to increasing row,\n usually a negative number (i.e. -1 * pixel height) if\n north-up.\n f: Y coordinate of the top left corner of the top left pixel\n\n A 6-element sequence of the affine transformation matrix\n coefficients in ``c, a, b, f, d, e`` order, (i.e. GDAL\n geotransform order) will be accepted until 1.0 (deprecated).\n\n A virtual filesystem can be specified. The ``vfs`` parameter may\n be an Apache Commons VFS style string beginning with \"zip://\" or\n \"tar://\"\". In this case, the ``path`` must be an absolute path\n within that container.\n\n \"\"\"\n if not isinstance(path, string_types):\n raise TypeError(\"invalid path: {0!r}\".format(path))\n if mode and not isinstance(mode, string_types):\n raise TypeError(\"invalid mode: {0!r}\".format(mode))\n if driver and not isinstance(driver, string_types):\n raise TypeError(\"invalid driver: {0!r}\".format(driver))\n if dtype and not check_dtype(dtype):\n raise TypeError(\"invalid dtype: {0!r}\".format(dtype))\n if transform:\n transform = guard_transform(transform)\n elif 'affine' in kwargs:\n affine = kwargs.pop('affine')\n transform = guard_transform(affine)\n\n # Get AWS credentials if we're attempting to access a raster\n # on S3.\n pth, archive, scheme = parse_path(path)\n if scheme == 's3':\n Env().get_aws_credentials()\n log.debug(\"AWS credentials have been obtained\")\n\n # Create dataset instances and pass the given env, which will\n # be taken over by the dataset's context manager if it is not\n # None.\n if mode == 'r':\n from rasterio._io import RasterReader\n s = RasterReader(path)\n elif mode == 'r+':\n from rasterio._io import writer\n s = writer(path, mode)\n elif mode == 'r-':\n from rasterio._base import DatasetReader\n s = DatasetReader(path)\n elif mode == 'w':\n from rasterio._io import writer\n s = writer(path, mode, driver=driver,\n width=width, height=height, count=count,\n crs=crs, transform=transform, dtype=dtype,\n nodata=nodata, **kwargs)\n else:\n raise ValueError(\n \"mode string must be one of 'r', 'r+', or 'w', not %s\" % mode)\n s.start()\n return s\n\n\n@ensure_env\ndef copy(src, dst, **kw):\n \"\"\"Copy a source raster to a new destination with driver specific\n creation options.\n\n Parameters\n ----------\n src: string\n an existing raster file\n dst: string\n valid path to output file.\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError:\n If source path is not a valid Dataset\n\n Notes\n -----\n A ``driver`` keyword argument with value like 'GTiff' or 'JPEG' is\n used to control the output format.\n\n This is the one way to create write-once files like JPEGs.\n \"\"\"\n from rasterio._copy import RasterCopier\n return RasterCopier()(src, dst, **kw)\n\n\ndef drivers(**kwargs):\n \"\"\"Create a gdal environment with registered drivers and creation\n options.\n\n This function is deprecated; please use ``env.Env`` instead.\n\n Parameters\n ----------\n **kwargs:: keyword arguments\n Configuration options that define GDAL driver behavior\n\n See https://trac.osgeo.org/gdal/wiki/ConfigOptions\n\n Returns\n -------\n GDALEnv responsible for managing the environment.\n\n Notes\n -----\n Use as a context manager, ``with rasterio.drivers(): ...``\n \"\"\"\n warnings.warn(\"Deprecated; Use env.Env instead\", DeprecationWarning)\n return Env(**kwargs)\n\n\nBand = namedtuple('Band', ['ds', 'bidx', 'dtype', 'shape'])\n\ndef band(ds, bidx):\n \"\"\"Wraps a dataset and a band index up as a 'Band'\n\n Parameters\n ----------\n ds: rasterio.RasterReader\n Open rasterio dataset\n bidx: int\n Band number, index starting at 1\n\n Returns\n -------\n a rasterio.Band\n \"\"\"\n return Band(\n ds,\n bidx,\n set(ds.dtypes).pop(),\n ds.shape)\n\n\ndef pad(array, transform, pad_width, mode=None, **kwargs):\n \"\"\"pad array and adjust affine transform matrix.\n\n Parameters\n ----------\n array: ndarray\n Numpy ndarray, for best results a 2D array\n transform: Affine transform\n transform object mapping pixel space to coordinates\n pad_width: int\n number of pixels to pad array on all four\n mode: str or function\n define the method for determining padded values\n\n Returns\n -------\n (array, transform): tuple\n Tuple of new array and affine transform\n\n Notes\n -----\n See numpy docs for details on mode and other kwargs:\n http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.pad.html\n \"\"\"\n import numpy as np\n transform = guard_transform(transform)\n padded_array = np.pad(array, pad_width, mode, **kwargs)\n padded_trans = list(transform)\n padded_trans[2] -= pad_width * padded_trans[0]\n padded_trans[5] -= pad_width * padded_trans[4]\n return padded_array, Affine(*padded_trans[:6])\n\n\ndef get_data_window(arr, nodata=None):\n warnings.warn(\"Deprecated; Use rasterio.windows instead\", DeprecationWarning)\n return windows.get_data_window(arr, nodata)\n\n\ndef window_union(data):\n warnings.warn(\"Deprecated; Use rasterio.windows instead\", DeprecationWarning)\n return windows.union(data)\n\n\ndef window_intersection(data):\n warnings.warn(\"Deprecated; Use rasterio.windows instead\", DeprecationWarning)\n return windows.intersection(data)\n\ndef windows_intersect(data):\n warnings.warn(\"Deprecated; Use rasterio.windows instead\", DeprecationWarning)\n return windows.intersect(data)\n",
"path": "rasterio/__init__.py"
}
] | diff --git a/rasterio/__init__.py b/rasterio/__init__.py
index 556e5c754..bb30387bc 100644
--- a/rasterio/__init__.py
+++ b/rasterio/__init__.py
@@ -32,7 +32,7 @@ def emit(self, record):
__all__ = [
'band', 'open', 'copy', 'pad']
-__version__ = "0.36.0"
+__version__ = "1.0.dev1"
__gdal_version__ = gdal_version()
# Rasterio attaches NullHandler to the 'rasterio' logger and its
|
searx__searx-1483 | wolframalpha engine is broken
The wolframalpha engine appears to be broken in 0.15.0
```
searx-run[9330]: ERROR:searx.search:engine wolframalpha : exception : Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.
searx-run[9330]: Traceback (most recent call last):
searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py", line 104, in search_one_request_safe
searx-run[9330]: search_results = search_one_request(engine, query, request_params)
searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py", line 87, in search_one_request
searx-run[9330]: return engine.response(response)
searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/engines/wolframalpha_api.py", line 68, in response
searx-run[9330]: search_results = etree.XML(resp.text)
searx-run[9330]: File "src/lxml/etree.pyx", line 3192, in lxml.etree.XML
searx-run[9330]: File "src/lxml/parser.pxi", line 1872, in lxml.etree._parseMemoryDocument
searx-run[9330]: ValueError: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.
```
wolframalpha engine is broken
The wolframalpha engine appears to be broken in 0.15.0
```
searx-run[9330]: ERROR:searx.search:engine wolframalpha : exception : Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.
searx-run[9330]: Traceback (most recent call last):
searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py", line 104, in search_one_request_safe
searx-run[9330]: search_results = search_one_request(engine, query, request_params)
searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py", line 87, in search_one_request
searx-run[9330]: return engine.response(response)
searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/engines/wolframalpha_api.py", line 68, in response
searx-run[9330]: search_results = etree.XML(resp.text)
searx-run[9330]: File "src/lxml/etree.pyx", line 3192, in lxml.etree.XML
searx-run[9330]: File "src/lxml/parser.pxi", line 1872, in lxml.etree._parseMemoryDocument
searx-run[9330]: ValueError: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.
```
| [
{
"content": "# Wolfram Alpha (Science)\n#\n# @website https://www.wolframalpha.com\n# @provide-api yes (https://api.wolframalpha.com/v2/)\n#\n# @using-api yes\n# @results XML\n# @stable yes\n# @parse url, infobox\n\nfrom lxml import etree\nfrom searx.url_utils import urlencode\n\n# search-url\nsearch_url = 'https://api.wolframalpha.com/v2/query?appid={api_key}&{query}'\nsite_url = 'https://www.wolframalpha.com/input/?{query}'\napi_key = '' # defined in settings.yml\n\n# xpath variables\nfailure_xpath = '/queryresult[attribute::success=\"false\"]'\ninput_xpath = '//pod[starts-with(attribute::id, \"Input\")]/subpod/plaintext'\npods_xpath = '//pod'\nsubpods_xpath = './subpod'\npod_primary_xpath = './@primary'\npod_id_xpath = './@id'\npod_title_xpath = './@title'\nplaintext_xpath = './plaintext'\nimage_xpath = './img'\nimg_src_xpath = './@src'\nimg_alt_xpath = './@alt'\n\n# pods to display as image in infobox\n# this pods do return a plaintext, but they look better and are more useful as images\nimage_pods = {'VisualRepresentation',\n 'Illustration'}\n\n\n# do search-request\ndef request(query, params):\n params['url'] = search_url.format(query=urlencode({'input': query}), api_key=api_key)\n params['headers']['Referer'] = site_url.format(query=urlencode({'i': query}))\n\n return params\n\n\n# replace private user area characters to make text legible\ndef replace_pua_chars(text):\n pua_chars = {u'\\uf522': u'\\u2192', # rigth arrow\n u'\\uf7b1': u'\\u2115', # set of natural numbers\n u'\\uf7b4': u'\\u211a', # set of rational numbers\n u'\\uf7b5': u'\\u211d', # set of real numbers\n u'\\uf7bd': u'\\u2124', # set of integer numbers\n u'\\uf74c': 'd', # differential\n u'\\uf74d': u'\\u212f', # euler's number\n u'\\uf74e': 'i', # imaginary number\n u'\\uf7d9': '='} # equals sign\n\n for k, v in pua_chars.items():\n text = text.replace(k, v)\n\n return text\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n search_results = etree.XML(resp.text)\n\n # return empty array if there are no results\n if search_results.xpath(failure_xpath):\n return []\n\n try:\n infobox_title = search_results.xpath(input_xpath)[0].text\n except:\n infobox_title = \"\"\n\n pods = search_results.xpath(pods_xpath)\n result_chunks = []\n result_content = \"\"\n for pod in pods:\n pod_id = pod.xpath(pod_id_xpath)[0]\n pod_title = pod.xpath(pod_title_xpath)[0]\n pod_is_result = pod.xpath(pod_primary_xpath)\n\n subpods = pod.xpath(subpods_xpath)\n if not subpods:\n continue\n\n # Appends either a text or an image, depending on which one is more suitable\n for subpod in subpods:\n content = subpod.xpath(plaintext_xpath)[0].text\n image = subpod.xpath(image_xpath)\n\n if content and pod_id not in image_pods:\n\n if pod_is_result or not result_content:\n if pod_id != \"Input\":\n result_content = \"%s: %s\" % (pod_title, content)\n\n # if no input pod was found, title is first plaintext pod\n if not infobox_title:\n infobox_title = content\n\n content = replace_pua_chars(content)\n result_chunks.append({'label': pod_title, 'value': content})\n\n elif image:\n result_chunks.append({'label': pod_title,\n 'image': {'src': image[0].xpath(img_src_xpath)[0],\n 'alt': image[0].xpath(img_alt_xpath)[0]}})\n\n if not result_chunks:\n return []\n\n title = \"Wolfram|Alpha (%s)\" % infobox_title\n\n # append infobox\n results.append({'infobox': infobox_title,\n 'attributes': result_chunks,\n 'urls': [{'title': 'Wolfram|Alpha', 'url': resp.request.headers['Referer']}]})\n\n # append link to site\n results.append({'url': resp.request.headers['Referer'],\n 'title': title,\n 'content': result_content})\n\n return results\n",
"path": "searx/engines/wolframalpha_api.py"
}
] | [
{
"content": "# Wolfram Alpha (Science)\n#\n# @website https://www.wolframalpha.com\n# @provide-api yes (https://api.wolframalpha.com/v2/)\n#\n# @using-api yes\n# @results XML\n# @stable yes\n# @parse url, infobox\n\nfrom lxml import etree\nfrom searx.url_utils import urlencode\n\n# search-url\nsearch_url = 'https://api.wolframalpha.com/v2/query?appid={api_key}&{query}'\nsite_url = 'https://www.wolframalpha.com/input/?{query}'\napi_key = '' # defined in settings.yml\n\n# xpath variables\nfailure_xpath = '/queryresult[attribute::success=\"false\"]'\ninput_xpath = '//pod[starts-with(attribute::id, \"Input\")]/subpod/plaintext'\npods_xpath = '//pod'\nsubpods_xpath = './subpod'\npod_primary_xpath = './@primary'\npod_id_xpath = './@id'\npod_title_xpath = './@title'\nplaintext_xpath = './plaintext'\nimage_xpath = './img'\nimg_src_xpath = './@src'\nimg_alt_xpath = './@alt'\n\n# pods to display as image in infobox\n# this pods do return a plaintext, but they look better and are more useful as images\nimage_pods = {'VisualRepresentation',\n 'Illustration'}\n\n\n# do search-request\ndef request(query, params):\n params['url'] = search_url.format(query=urlencode({'input': query}), api_key=api_key)\n params['headers']['Referer'] = site_url.format(query=urlencode({'i': query}))\n\n return params\n\n\n# replace private user area characters to make text legible\ndef replace_pua_chars(text):\n pua_chars = {u'\\uf522': u'\\u2192', # rigth arrow\n u'\\uf7b1': u'\\u2115', # set of natural numbers\n u'\\uf7b4': u'\\u211a', # set of rational numbers\n u'\\uf7b5': u'\\u211d', # set of real numbers\n u'\\uf7bd': u'\\u2124', # set of integer numbers\n u'\\uf74c': 'd', # differential\n u'\\uf74d': u'\\u212f', # euler's number\n u'\\uf74e': 'i', # imaginary number\n u'\\uf7d9': '='} # equals sign\n\n for k, v in pua_chars.items():\n text = text.replace(k, v)\n\n return text\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n search_results = etree.XML(resp.content)\n\n # return empty array if there are no results\n if search_results.xpath(failure_xpath):\n return []\n\n try:\n infobox_title = search_results.xpath(input_xpath)[0].text\n except:\n infobox_title = \"\"\n\n pods = search_results.xpath(pods_xpath)\n result_chunks = []\n result_content = \"\"\n for pod in pods:\n pod_id = pod.xpath(pod_id_xpath)[0]\n pod_title = pod.xpath(pod_title_xpath)[0]\n pod_is_result = pod.xpath(pod_primary_xpath)\n\n subpods = pod.xpath(subpods_xpath)\n if not subpods:\n continue\n\n # Appends either a text or an image, depending on which one is more suitable\n for subpod in subpods:\n content = subpod.xpath(plaintext_xpath)[0].text\n image = subpod.xpath(image_xpath)\n\n if content and pod_id not in image_pods:\n\n if pod_is_result or not result_content:\n if pod_id != \"Input\":\n result_content = \"%s: %s\" % (pod_title, content)\n\n # if no input pod was found, title is first plaintext pod\n if not infobox_title:\n infobox_title = content\n\n content = replace_pua_chars(content)\n result_chunks.append({'label': pod_title, 'value': content})\n\n elif image:\n result_chunks.append({'label': pod_title,\n 'image': {'src': image[0].xpath(img_src_xpath)[0],\n 'alt': image[0].xpath(img_alt_xpath)[0]}})\n\n if not result_chunks:\n return []\n\n title = \"Wolfram|Alpha (%s)\" % infobox_title\n\n # append infobox\n results.append({'infobox': infobox_title,\n 'attributes': result_chunks,\n 'urls': [{'title': 'Wolfram|Alpha', 'url': resp.request.headers['Referer']}]})\n\n # append link to site\n results.append({'url': resp.request.headers['Referer'],\n 'title': title,\n 'content': result_content})\n\n return results\n",
"path": "searx/engines/wolframalpha_api.py"
}
] | diff --git a/searx/engines/wolframalpha_api.py b/searx/engines/wolframalpha_api.py
index 595c6b7de3..1c58c4a9b7 100644
--- a/searx/engines/wolframalpha_api.py
+++ b/searx/engines/wolframalpha_api.py
@@ -65,7 +65,7 @@ def replace_pua_chars(text):
def response(resp):
results = []
- search_results = etree.XML(resp.text)
+ search_results = etree.XML(resp.content)
# return empty array if there are no results
if search_results.xpath(failure_xpath):
diff --git a/tests/unit/engines/test_wolframalpha_api.py b/tests/unit/engines/test_wolframalpha_api.py
index 30d3376457..0433b34aab 100644
--- a/tests/unit/engines/test_wolframalpha_api.py
+++ b/tests/unit/engines/test_wolframalpha_api.py
@@ -35,7 +35,7 @@ def test_response(self):
xml = '''<?xml version='1.0' encoding='UTF-8'?>
<queryresult success='false' error='false' />
'''
- response = mock.Mock(text=xml.encode('utf-8'))
+ response = mock.Mock(content=xml.encode('utf-8'))
self.assertEqual(wolframalpha_api.response(response), [])
# test basic case
@@ -83,7 +83,7 @@ def test_response(self):
</pod>
</queryresult>
"""
- response = mock.Mock(text=xml, request=request)
+ response = mock.Mock(content=xml, request=request)
results = wolframalpha_api.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 2)
@@ -144,7 +144,7 @@ def test_response(self):
</pod>
</queryresult>
"""
- response = mock.Mock(text=xml, request=request)
+ response = mock.Mock(content=xml, request=request)
results = wolframalpha_api.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 2)
|
nautobot__nautobot-2575 | Napalm Configuration Does Not Match Documentation
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Nautobot version (Docker tag too if applicable): 1.4.X
* Python version: 3.8
* Database platform, version: NA
* Middleware(s): NA
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
1. Deploy Nautobot with Napalm user pass & timeout in environment as specified [here](https://docs.nautobot.com/projects/core/en/stable/configuration/optional-settings/#napalm_username)
2. Attempt to use Napalm with configured ENV credentials
<!-- What did you expect to happen? -->
### Expected Behavior
The three settings are loaded into the django.conf.settings
<!-- What happened instead? -->
### Observed Behavior
The three settings inherit default and must be specified in nautobot_config caused by [this](https://github.com/nautobot/nautobot/blob/v1.4.5/nautobot/core/settings.py#L84-L86).
May be related to #2393
| [
{
"content": "import os\nimport platform\nimport re\n\nfrom django.contrib.messages import constants as messages\nimport django.forms\n\nfrom nautobot import __version__\nfrom nautobot.core.settings_funcs import is_truthy, parse_redis_connection # noqa: F401\n\n#\n# Environment setup\n#\n\n# This is used for display in the UI.\nVERSION = __version__\n\n# Hostname of the system. This is displayed in the web UI footers along with the\n# version.\nHOSTNAME = platform.node()\n\n# Set the base directory two levels up (i.e. the base nautobot/ directory)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# Set the swapable User model to the Nautobot custom User model\nAUTH_USER_MODEL = \"users.User\"\n\n# Set the default AutoField for 3rd party apps\n# N.B. Ideally this would be a `UUIDField`, but due to Django restrictions\n# we can’t do that yet\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n\n\n###############################################################\n# NAUTOBOT - Settings for Nautobot internals/plugins/defaults #\n###############################################################\n\n#\n# Nautobot optional settings/defaults\n#\nALLOWED_URL_SCHEMES = (\n \"file\",\n \"ftp\",\n \"ftps\",\n \"http\",\n \"https\",\n \"irc\",\n \"mailto\",\n \"sftp\",\n \"ssh\",\n \"tel\",\n \"telnet\",\n \"tftp\",\n \"vnc\",\n \"xmpp\",\n)\n\n# Base directory wherein all created files (jobs, git repositories, file uploads, static files) will be stored)\nNAUTOBOT_ROOT = os.getenv(\"NAUTOBOT_ROOT\", os.path.expanduser(\"~/.nautobot\"))\n\n# By default, Nautobot will permit users to create duplicate prefixes and IP addresses in the global\n# table (that is, those which are not assigned to any VRF). This behavior can be disabled by setting\n# ENFORCE_GLOBAL_UNIQUE to True.\nENFORCE_GLOBAL_UNIQUE = False\n\n# Exclude potentially sensitive models from wildcard view exemption. These may still be exempted\n# by specifying the model individually in the EXEMPT_VIEW_PERMISSIONS configuration parameter.\nEXEMPT_EXCLUDE_MODELS = (\n (\"auth\", \"group\"),\n (\"users\", \"user\"),\n (\"users\", \"objectpermission\"),\n)\n\nEXEMPT_VIEW_PERMISSIONS = []\nGIT_ROOT = os.getenv(\"NAUTOBOT_GIT_ROOT\", os.path.join(NAUTOBOT_ROOT, \"git\").rstrip(\"/\"))\nHTTP_PROXIES = None\nJOBS_ROOT = os.getenv(\"NAUTOBOT_JOBS_ROOT\", os.path.join(NAUTOBOT_ROOT, \"jobs\").rstrip(\"/\"))\nMAINTENANCE_MODE = False\n# Metrics\nMETRICS_ENABLED = False\n\n# Napalm\nNAPALM_ARGS = {}\nNAPALM_PASSWORD = \"\"\nNAPALM_TIMEOUT = 30\nNAPALM_USERNAME = \"\"\n\n# Plugins\nPLUGINS = []\nPLUGINS_CONFIG = {}\n\n# Global 3rd-party authentication settings\nEXTERNAL_AUTH_DEFAULT_GROUPS = []\nEXTERNAL_AUTH_DEFAULT_PERMISSIONS = {}\n\n# Remote auth backend settings\nREMOTE_AUTH_AUTO_CREATE_USER = False\nREMOTE_AUTH_HEADER = \"HTTP_REMOTE_USER\"\n\n# SSO backend settings https://python-social-auth.readthedocs.io/en/latest/configuration/settings.html\nSOCIAL_AUTH_POSTGRES_JSONFIELD = False\n# Nautobot related - May be overridden if using custom social auth backend\nSOCIAL_AUTH_BACKEND_PREFIX = \"social_core.backends\"\n\n# Job log entry sanitization and similar\nSANITIZER_PATTERNS = [\n # General removal of username-like and password-like tokens\n (re.compile(r\"(https?://)?\\S+\\s*@\", re.IGNORECASE), r\"\\1{replacement}@\"),\n (re.compile(r\"(username|password|passwd|pwd)(\\s*i?s?\\s*:?\\s*)?\\S+\", re.IGNORECASE), r\"\\1\\2{replacement}\"),\n]\n\n# Storage\nSTORAGE_BACKEND = None\nSTORAGE_CONFIG = {}\n\n# Test runner that is aware of our use of \"integration\" tags and only runs\n# integration tests if explicitly passed in with `nautobot-server test --tag integration`.\nTEST_RUNNER = \"nautobot.core.tests.runner.NautobotTestRunner\"\n\n#\n# Django cryptography\n#\n\n# CRYPTOGRAPHY_BACKEND = cryptography.hazmat.backends.default_backend()\n# CRYPTOGRAPHY_DIGEST = cryptography.hazmat.primitives.hashes.SHA256\nCRYPTOGRAPHY_KEY = None # Defaults to SECRET_KEY if unset\nCRYPTOGRAPHY_SALT = \"nautobot-cryptography\"\n\n\n#\n# Django Prometheus\n#\n\nPROMETHEUS_EXPORT_MIGRATIONS = False\n\n\n#\n# Django filters\n#\n\nFILTERS_NULL_CHOICE_LABEL = \"None\"\nFILTERS_NULL_CHOICE_VALUE = \"null\"\n\nSTRICT_FILTERING = True\n\n#\n# Django REST framework (API)\n#\n\nREST_FRAMEWORK_VERSION = VERSION.rsplit(\".\", 1)[0] # Use major.minor as API version\ncurrent_major, current_minor = REST_FRAMEWORK_VERSION.split(\".\")\n# We support all major.minor API versions from 1.2 to the present latest version.\n# This will need to be elaborated upon when we move to version 2.0\n# Similar logic exists in tasks.py, please keep them in sync!\nassert current_major == \"1\", f\"REST_FRAMEWORK_ALLOWED_VERSIONS needs to be updated to handle version {current_major}\"\nREST_FRAMEWORK_ALLOWED_VERSIONS = [f\"{current_major}.{minor}\" for minor in range(2, int(current_minor) + 1)]\n\nREST_FRAMEWORK = {\n \"ALLOWED_VERSIONS\": REST_FRAMEWORK_ALLOWED_VERSIONS,\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.SessionAuthentication\",\n \"nautobot.core.api.authentication.TokenAuthentication\",\n ),\n \"DEFAULT_FILTER_BACKENDS\": (\"nautobot.core.api.filter_backends.NautobotFilterBackend\",),\n \"DEFAULT_METADATA_CLASS\": \"nautobot.core.api.metadata.BulkOperationMetadata\",\n \"DEFAULT_PAGINATION_CLASS\": \"nautobot.core.api.pagination.OptionalLimitOffsetPagination\",\n \"DEFAULT_PERMISSION_CLASSES\": (\"nautobot.core.api.authentication.TokenPermissions\",),\n \"DEFAULT_RENDERER_CLASSES\": (\n \"rest_framework.renderers.JSONRenderer\",\n \"nautobot.core.api.renderers.FormlessBrowsableAPIRenderer\",\n ),\n \"DEFAULT_SCHEMA_CLASS\": \"nautobot.core.api.schema.NautobotAutoSchema\",\n # Version to use if the client doesn't request otherwise.\n # This should only change (if at all) with Nautobot major (breaking) releases.\n \"DEFAULT_VERSION\": \"1.2\",\n \"DEFAULT_VERSIONING_CLASS\": \"nautobot.core.api.versioning.NautobotAPIVersioning\",\n \"PAGE_SIZE\": None,\n \"SCHEMA_COERCE_METHOD_NAMES\": {\n # Default mappings\n \"retrieve\": \"read\",\n \"destroy\": \"delete\",\n # Custom operations\n \"bulk_destroy\": \"bulk_delete\",\n },\n \"VIEW_NAME_FUNCTION\": \"nautobot.utilities.api.get_view_name\",\n}\n\n\n#\n# drf_spectacular (OpenAPI/Swagger)\n#\n\nSPECTACULAR_SETTINGS = {\n \"TITLE\": \"API Documentation\",\n \"DESCRIPTION\": \"Source of truth and network automation platform\",\n \"LICENSE\": {\"name\": \"Apache v2 License\"},\n \"VERSION\": VERSION,\n # For a semblance of backwards-compatibility with drf-yasg / OpenAPI 2.0, where \"/api\" was a common \"basePath\"\n # in the schema.\n # OpenAPI 3.0 removes \"basePath\" in favor of \"servers\", so we now declare \"/api\" as the server relative URL and\n # trim it from all of the individual paths correspondingly.\n # See also https://github.com/nautobot/nautobot-ansible/pull/135 for an example of why this is desirable.\n \"SERVERS\": [{\"url\": \"/api\"}],\n \"SCHEMA_PATH_PREFIX\": \"/api\",\n \"SCHEMA_PATH_PREFIX_TRIM\": True,\n # use sidecar - locally packaged UI files, not CDN\n \"SWAGGER_UI_DIST\": \"SIDECAR\",\n \"SWAGGER_UI_FAVICON_HREF\": \"SIDECAR\",\n \"REDOC_DIST\": \"SIDECAR\",\n \"ENUM_NAME_OVERRIDES\": {\n # These choice enums need to be overridden because they get assigned to the `type` field and\n # result in this error:\n # enum naming encountered a non-optimally resolvable collision for fields named \"type\".\n \"CableTypeChoices\": \"nautobot.dcim.choices.CableTypeChoices\",\n \"ConsolePortTypeChoices\": \"nautobot.dcim.choices.ConsolePortTypeChoices\",\n \"CustomFieldTypeChoices\": \"nautobot.extras.choices.CustomFieldTypeChoices\",\n \"InterfaceTypeChoices\": \"nautobot.dcim.choices.InterfaceTypeChoices\",\n \"PortTypeChoices\": \"nautobot.dcim.choices.PortTypeChoices\",\n \"PowerFeedTypeChoices\": \"nautobot.dcim.choices.PowerFeedTypeChoices\",\n \"PowerOutletTypeChoices\": \"nautobot.dcim.choices.PowerOutletTypeChoices\",\n \"PowerPortTypeChoices\": \"nautobot.dcim.choices.PowerPortTypeChoices\",\n \"RackTypeChoices\": \"nautobot.dcim.choices.RackTypeChoices\",\n \"RelationshipTypeChoices\": \"nautobot.extras.choices.RelationshipTypeChoices\",\n # Each of these StatusModels has bulk and non-bulk serializers, with the same status options,\n # which confounds drf-spectacular's automatic naming of enums, resulting in the below warning:\n # enum naming encountered a non-optimally resolvable collision for fields named \"status\"\n # By explicitly naming the enums ourselves we avoid this warning.\n \"CableStatusChoices\": \"nautobot.dcim.api.serializers.CableSerializer.status_choices\",\n \"CircuitStatusChoices\": \"nautobot.circuits.api.serializers.CircuitSerializer.status_choices\",\n \"DeviceStatusChoices\": \"nautobot.dcim.api.serializers.DeviceWithConfigContextSerializer.status_choices\",\n \"InterfaceStatusChoices\": \"nautobot.dcim.api.serializers.InterfaceSerializer.status_choices\",\n \"IPAddressStatusChoices\": \"nautobot.ipam.api.serializers.IPAddressSerializer.status_choices\",\n \"LocationStatusChoices\": \"nautobot.dcim.api.serializers.LocationSerializer.status_choices\",\n \"PowerFeedStatusChoices\": \"nautobot.dcim.api.serializers.PowerFeedSerializer.status_choices\",\n \"PrefixStatusChoices\": \"nautobot.ipam.api.serializers.PrefixSerializer.status_choices\",\n \"RackStatusChoices\": \"nautobot.dcim.api.serializers.RackSerializer.status_choices\",\n \"VirtualMachineStatusChoices\": \"nautobot.virtualization.api.serializers.VirtualMachineWithConfigContextSerializer.status_choices\",\n \"VLANStatusChoices\": \"nautobot.ipam.api.serializers.VLANSerializer.status_choices\",\n },\n # Create separate schema components for PATCH requests (fields generally are not `required` on PATCH)\n \"COMPONENT_SPLIT_PATCH\": True,\n # Create separate schema components for request vs response where appropriate\n \"COMPONENT_SPLIT_REQUEST\": True,\n}\n\n\n##############################################\n# DJANGO - Core settings required for Django #\n##############################################\n\n#\n# Databases\n#\n\n# Only PostgresSQL is supported, so database driver is hard-coded. This can\n# still be overloaded in custom settings.\n# https://docs.djangoproject.com/en/stable/ref/settings/#databases\nDATABASES = {\n \"default\": {\n \"NAME\": os.getenv(\"NAUTOBOT_DATABASE\", \"nautobot\"),\n \"USER\": os.getenv(\"NAUTOBOT_USER\", \"\"),\n \"PASSWORD\": os.getenv(\"NAUTOBOT_PASSWORD\", \"\"),\n \"HOST\": os.getenv(\"NAUTOBOT_DB_HOST\", \"localhost\"),\n \"PORT\": os.getenv(\"NAUTOBOT_DB_PORT\", \"\"),\n \"CONN_MAX_AGE\": int(os.getenv(\"NAUTOBOT_DB_TIMEOUT\", \"300\")),\n \"ENGINE\": os.getenv(\"NAUTOBOT_DB_ENGINE\", \"django.db.backends.postgresql\"),\n }\n}\n\n# The secret key is used to encrypt session keys and salt passwords.\nSECRET_KEY = os.getenv(\"SECRET_KEY\")\n\n# Default overrides\nALLOWED_HOSTS = []\nCSRF_TRUSTED_ORIGINS = []\nDATETIME_FORMAT = \"N j, Y g:i a\"\nINTERNAL_IPS = (\"127.0.0.1\", \"::1\")\nFORCE_SCRIPT_NAME = None\nLOGGING = {}\nMEDIA_ROOT = os.path.join(NAUTOBOT_ROOT, \"media\").rstrip(\"/\")\nSESSION_FILE_PATH = None\nSHORT_DATE_FORMAT = \"Y-m-d\"\nSHORT_DATETIME_FORMAT = \"Y-m-d H:i\"\nTIME_FORMAT = \"g:i a\"\nTIME_ZONE = \"UTC\"\n\n# Disable importing the WSGI module before starting the server application. This is required for\n# uWSGI postfork callbacks to execute as is currently required in `nautobot.core.wsgi`.\nWEBSERVER_WARMUP = False\n\n# Installed apps and Django plugins. Nautobot plugins will be appended here later.\nINSTALLED_APPS = [\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"cacheops\",\n \"corsheaders\",\n \"django_filters\",\n \"django_jinja\",\n \"django_tables2\",\n \"django_prometheus\",\n \"mptt\",\n \"social_django\",\n \"taggit\",\n \"timezone_field\",\n \"nautobot.core.apps.NautobotConstanceConfig\", # overridden form of \"constance\" AppConfig\n \"nautobot.core\",\n \"django.contrib.admin\", # Must be after `nautobot.core` for template overrides\n \"django_celery_beat\", # Must be after `nautobot.core` for template overrides\n \"rest_framework\", # Must be after `nautobot.core` for template overrides\n \"db_file_storage\",\n \"nautobot.circuits\",\n \"nautobot.dcim\",\n \"nautobot.ipam\",\n \"nautobot.extras\",\n \"nautobot.tenancy\",\n \"nautobot.users\",\n \"nautobot.utilities\",\n \"nautobot.virtualization\",\n \"django_rq\", # Must come after nautobot.extras to allow overriding management commands\n \"drf_spectacular\",\n \"drf_spectacular_sidecar\",\n \"graphene_django\",\n \"health_check\",\n \"health_check.storage\",\n \"django_extensions\",\n \"nautobot.core.apps.ConstanceDatabaseAppConfig\", # fix default_auto_field\n \"django_ajax_tables\",\n]\n\n# Middleware\nMIDDLEWARE = [\n \"django_prometheus.middleware.PrometheusBeforeMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"nautobot.core.middleware.ExceptionHandlingMiddleware\",\n \"nautobot.core.middleware.RemoteUserMiddleware\",\n \"nautobot.core.middleware.ExternalAuthMiddleware\",\n \"nautobot.core.middleware.ObjectChangeMiddleware\",\n \"django_prometheus.middleware.PrometheusAfterMiddleware\",\n]\n\nROOT_URLCONF = \"nautobot.core.urls\"\n\nTEMPLATES = [\n {\n \"NAME\": \"django\",\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.template.context_processors.media\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"social_django.context_processors.backends\",\n \"social_django.context_processors.login_redirect\",\n \"nautobot.core.context_processors.settings\",\n \"nautobot.core.context_processors.sso_auth\",\n ],\n },\n },\n {\n \"NAME\": \"jinja\",\n \"BACKEND\": \"django_jinja.backend.Jinja2\",\n \"DIRS\": [],\n \"APP_DIRS\": False,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.template.context_processors.media\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"social_django.context_processors.backends\",\n \"social_django.context_processors.login_redirect\",\n \"nautobot.core.context_processors.settings\",\n \"nautobot.core.context_processors.sso_auth\",\n ],\n },\n },\n]\n\n# Set up authentication backends\nAUTHENTICATION_BACKENDS = [\n # Always check object permissions\n \"nautobot.core.authentication.ObjectPermissionBackend\",\n]\n\n# Internationalization\nLANGUAGE_CODE = \"en-us\"\nUSE_I18N = True\nUSE_TZ = True\n\n# WSGI\nWSGI_APPLICATION = \"nautobot.core.wsgi.application\"\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\nUSE_X_FORWARDED_HOST = True\nX_FRAME_OPTIONS = \"DENY\"\n\n# Static files (CSS, JavaScript, Images)\nSTATIC_ROOT = os.path.join(NAUTOBOT_ROOT, \"static\")\nSTATIC_URL = \"static/\"\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"project-static\"),)\n\n# Media\nMEDIA_URL = \"media/\"\n\n# Disable default limit of 1000 fields per request. Needed for bulk deletion of objects. (Added in Django 1.10.)\nDATA_UPLOAD_MAX_NUMBER_FIELDS = None\n\n# Messages\nMESSAGE_TAGS = {\n messages.ERROR: \"danger\",\n}\n\n# Authentication URLs\n# This is the URL route name for the login view.\nLOGIN_URL = \"login\"\n\n# This is the URL route name for the home page (index) view.\nLOGIN_REDIRECT_URL = \"home\"\n\n#\n# django-constance\n#\n\nCONSTANCE_BACKEND = \"constance.backends.database.DatabaseBackend\"\nCONSTANCE_DATABASE_PREFIX = \"constance:nautobot:\"\nCONSTANCE_IGNORE_ADMIN_VERSION_CHECK = True # avoid potential errors in a multi-node deployment\n\nCONSTANCE_ADDITIONAL_FIELDS = {\n \"per_page_defaults_field\": [\n \"nautobot.utilities.forms.fields.JSONArrayFormField\",\n {\n \"widget\": \"django.forms.TextInput\",\n \"base_field\": django.forms.IntegerField(min_value=1),\n },\n ],\n \"release_check_timeout_field\": [\n \"django.forms.IntegerField\",\n {\n \"min_value\": 3600,\n },\n ],\n \"release_check_url_field\": [\n \"django.forms.URLField\",\n {\n \"required\": False,\n },\n ],\n}\n\nCONSTANCE_CONFIG = {\n \"BANNER_BOTTOM\": [\n \"\",\n \"Custom HTML to display in a banner at the bottom of all pages.\",\n ],\n \"BANNER_LOGIN\": [\n \"\",\n \"Custom HTML to display in a banner at the top of the login page.\",\n ],\n \"BANNER_TOP\": [\n \"\",\n \"Custom HTML to display in a banner at the top of all pages.\",\n ],\n \"CHANGELOG_RETENTION\": [\n 90,\n \"Number of days to retain object changelog history.\\nSet this to 0 to retain changes indefinitely.\",\n ],\n \"DISABLE_PREFIX_LIST_HIERARCHY\": [\n False,\n \"Disable rendering parent/child relationships in the IPAM Prefix list view and instead show a flat list.\",\n ],\n \"HIDE_RESTRICTED_UI\": [\n False,\n \"If set to True, users with limited permissions will not be shown menu items and home-page elements that \"\n \"they do not have permission to access.\",\n ],\n \"MAX_PAGE_SIZE\": [\n 1000,\n \"Maximum number of objects that a user can list in one UI page or one API call.\\n\"\n \"If set to 0, a user can retrieve an unlimited number of objects.\",\n ],\n \"PAGINATE_COUNT\": [\n 50,\n \"Default number of objects to display per page when listing objects in the UI and/or REST API.\",\n ],\n \"PER_PAGE_DEFAULTS\": [\n [25, 50, 100, 250, 500, 1000],\n \"Pagination options to present to the user to choose amongst.\\n\"\n \"For proper user experience, this list should include the PAGINATE_COUNT and MAX_PAGE_SIZE values as options.\",\n # Use custom field type defined above\n \"per_page_defaults_field\",\n ],\n \"PREFER_IPV4\": [\n False,\n \"Whether to prefer IPv4 primary addresses over IPv6 primary addresses for devices.\",\n ],\n \"RACK_ELEVATION_DEFAULT_UNIT_HEIGHT\": [\n 22,\n \"Default height (in pixels) of a rack unit in a rack elevation diagram\",\n ],\n \"RACK_ELEVATION_DEFAULT_UNIT_WIDTH\": [\n 230,\n \"Default width (in pixels) of a rack unit in a rack elevation diagram\",\n ],\n \"RELEASE_CHECK_TIMEOUT\": [\n 24 * 3600,\n \"Number of seconds (must be at least 3600, or one hour) to cache the result of a release check \"\n \"before checking again for a new release.\",\n # Use custom field type defined above\n \"release_check_timeout_field\",\n ],\n \"RELEASE_CHECK_URL\": [\n \"\",\n \"URL of GitHub repository REST API endpoint to poll periodically for availability of new Nautobot releases.\\n\"\n 'This can be set to the official repository \"https://api.github.com/repos/nautobot/nautobot/releases\" or '\n \"a custom fork.\\nSet this to an empty string to disable automatic update checks.\",\n # Use custom field type defined above\n \"release_check_url_field\",\n ],\n}\n\nCONSTANCE_CONFIG_FIELDSETS = {\n \"Banners\": [\"BANNER_LOGIN\", \"BANNER_TOP\", \"BANNER_BOTTOM\"],\n \"Change Logging\": [\"CHANGELOG_RETENTION\"],\n \"Device Connectivity\": [\"PREFER_IPV4\"],\n \"Pagination\": [\"PAGINATE_COUNT\", \"MAX_PAGE_SIZE\", \"PER_PAGE_DEFAULTS\"],\n \"Rack Elevation Rendering\": [\"RACK_ELEVATION_DEFAULT_UNIT_HEIGHT\", \"RACK_ELEVATION_DEFAULT_UNIT_WIDTH\"],\n \"Release Checking\": [\"RELEASE_CHECK_URL\", \"RELEASE_CHECK_TIMEOUT\"],\n \"User Interface\": [\"DISABLE_PREFIX_LIST_HIERARCHY\", \"HIDE_RESTRICTED_UI\"],\n}\n\n#\n# From django-cors-headers\n#\n\n# If True, all origins will be allowed. Other settings restricting allowed origins will be ignored.\n# Defaults to False. Setting this to True can be dangerous, as it allows any website to make\n# cross-origin requests to yours. Generally you'll want to restrict the list of allowed origins with\n# CORS_ALLOWED_ORIGINS or CORS_ALLOWED_ORIGIN_REGEXES.\nCORS_ALLOW_ALL_ORIGINS = False\n\n# A list of strings representing regexes that match Origins that are authorized to make cross-site\n# HTTP requests. Defaults to [].\nCORS_ALLOWED_ORIGIN_REGEXES = []\n\n# A list of origins that are authorized to make cross-site HTTP requests. Defaults to [].\nCORS_ALLOWED_ORIGINS = []\n\n#\n# GraphQL\n#\n\nGRAPHENE = {\n \"SCHEMA\": \"nautobot.core.graphql.schema_init.schema\",\n \"DJANGO_CHOICE_FIELD_ENUM_V3_NAMING\": True, # any field with a name of type will break in Graphene otherwise.\n}\nGRAPHQL_CUSTOM_FIELD_PREFIX = \"cf\"\nGRAPHQL_RELATIONSHIP_PREFIX = \"rel\"\nGRAPHQL_COMPUTED_FIELD_PREFIX = \"cpf\"\n\n\n#\n# Caching\n#\n\n# The django-cacheops plugin is used to cache querysets. The built-in Django\n# caching is not used.\nCACHEOPS = {\n \"auth.user\": {\"ops\": \"get\", \"timeout\": 60 * 15},\n \"auth.*\": {\"ops\": (\"fetch\", \"get\")},\n \"auth.permission\": {\"ops\": \"all\"},\n \"circuits.*\": {\"ops\": \"all\"},\n \"dcim.inventoryitem\": None, # MPTT models are exempt due to raw SQL\n \"dcim.region\": None, # MPTT models are exempt due to raw SQL\n \"dcim.rackgroup\": None, # MPTT models are exempt due to raw SQL\n \"dcim.*\": {\"ops\": \"all\"},\n \"ipam.*\": {\"ops\": \"all\"},\n \"extras.*\": {\"ops\": \"all\"},\n \"users.*\": {\"ops\": \"all\"},\n \"tenancy.tenantgroup\": None, # MPTT models are exempt due to raw SQL\n \"tenancy.*\": {\"ops\": \"all\"},\n \"virtualization.*\": {\"ops\": \"all\"},\n}\nCACHEOPS_DEGRADE_ON_FAILURE = True\nCACHEOPS_ENABLED = True\nCACHEOPS_REDIS = \"redis://localhost:6379/1\"\nCACHEOPS_DEFAULTS = {\"timeout\": 900}\n\n# The django-redis cache is used to establish concurrent locks using Redis. The\n# django-rq settings will use the same instance/database by default.\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": \"redis://localhost:6379/0\",\n \"TIMEOUT\": 300,\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n \"PASSWORD\": \"\",\n },\n }\n}\n\n#\n# Django RQ (used for legacy background processesing)\n#\n\n# These defaults utilize the Django caches setting defined for django-redis.\n# See: https://github.com/rq/django-rq#support-for-django-redis-and-django-redis-cache\nRQ_QUEUES = {\n \"default\": {\n \"USE_REDIS_CACHE\": \"default\",\n },\n \"check_releases\": {\n \"USE_REDIS_CACHE\": \"default\",\n },\n \"custom_fields\": {\n \"USE_REDIS_CACHE\": \"default\",\n },\n \"webhooks\": {\n \"USE_REDIS_CACHE\": \"default\",\n },\n}\n\n#\n# Celery (used for background processing)\n#\n\n# Celery broker URL used to tell workers where queues are located\nCELERY_BROKER_URL = os.getenv(\"NAUTOBOT_CELERY_BROKER_URL\", parse_redis_connection(redis_database=0))\n\n# Celery results backend URL to tell workers where to publish task results\nCELERY_RESULT_BACKEND = os.getenv(\"NAUTOBOT_CELERY_RESULT_BACKEND\", parse_redis_connection(redis_database=0))\n\n# Instruct celery to report the started status of a job, instead of just `pending`, `finished`, or `failed`\nCELERY_TASK_TRACK_STARTED = True\n\n# Global task time limits (seconds)\n# Exceeding the soft limit will result in a SoftTimeLimitExceeded exception,\n# while exceeding the hard limit will result in a SIGKILL.\nCELERY_TASK_SOFT_TIME_LIMIT = int(os.getenv(\"NAUTOBOT_CELERY_TASK_SOFT_TIME_LIMIT\", str(5 * 60)))\nCELERY_TASK_TIME_LIMIT = int(os.getenv(\"NAUTOBOT_CELERY_TASK_TIME_LIMIT\", str(10 * 60)))\n\n# These settings define the custom nautobot serialization encoding as an accepted data encoding format\n# and register that format for task input and result serialization\nCELERY_ACCEPT_CONTENT = [\"nautobot_json\"]\nCELERY_RESULT_ACCEPT_CONTENT = [\"nautobot_json\"]\nCELERY_TASK_SERIALIZER = \"nautobot_json\"\nCELERY_RESULT_SERIALIZER = \"nautobot_json\"\n\nCELERY_BEAT_SCHEDULER = \"nautobot.core.celery.schedulers:NautobotDatabaseScheduler\"\n\n# Sets an age out timer of redis lock. This is NOT implicitially applied to locks, must be added\n# to a lock creation as `timeout=settings.REDIS_LOCK_TIMEOUT`\nREDIS_LOCK_TIMEOUT = int(os.getenv(\"NAUTOBOT_REDIS_LOCK_TIMEOUT\", \"600\"))\n\n#\n# Custom branding (logo and title)\n#\n\n# Branding logo locations. The logo takes the place of the Nautobot logo in the top right of the nav bar.\n# The filepath should be relative to the `MEDIA_ROOT`.\nBRANDING_FILEPATHS = {\n \"logo\": os.getenv(\"NAUTOBOT_BRANDING_FILEPATHS_LOGO\", None), # Navbar logo\n \"favicon\": os.getenv(\"NAUTOBOT_BRANDING_FILEPATHS_FAVICON\", None), # Browser favicon\n \"icon_16\": os.getenv(\"NAUTOBOT_BRANDING_FILEPATHS_ICON_16\", None), # 16x16px icon\n \"icon_32\": os.getenv(\"NAUTOBOT_BRANDING_FILEPATHS_ICON_32\", None), # 32x32px icon\n \"icon_180\": os.getenv(\n \"NAUTOBOT_BRANDING_FILEPATHS_ICON_180\", None\n ), # 180x180px icon - used for the apple-touch-icon header\n \"icon_192\": os.getenv(\"NAUTOBOT_BRANDING_FILEPATHS_ICON_192\", None), # 192x192px icon\n \"icon_mask\": os.getenv(\n \"NAUTOBOT_BRANDING_FILEPATHS_ICON_MASK\", None\n ), # mono-chrome icon used for the mask-icon header\n}\n\n# Title to use in place of \"Nautobot\"\nBRANDING_TITLE = os.getenv(\"NAUTOBOT_BRANDING_TITLE\", \"Nautobot\")\n\n# Prepended to CSV, YAML and export template filenames (i.e. `nautobot_device.yml`)\nBRANDING_PREPENDED_FILENAME = os.getenv(\"NAUTOBOT_BRANDING_PREPENDED_FILENAME\", \"nautobot_\")\n\n# Branding URLs (links in the bottom right of the footer)\nBRANDING_URLS = {\n \"code\": os.getenv(\"NAUTOBOT_BRANDING_URLS_CODE\", \"https://github.com/nautobot/nautobot\"),\n \"docs\": os.getenv(\"NAUTOBOT_BRANDING_URLS_DOCS\", None),\n \"help\": os.getenv(\"NAUTOBOT_BRANDING_URLS_HELP\", \"https://github.com/nautobot/nautobot/wiki\"),\n}\n\n# Undocumented link in the bottom right of the footer which is meant to persist any custom branding changes.\nBRANDING_POWERED_BY_URL = \"https://docs.nautobot.com/\"\n\n#\n# Django extensions settings\n#\n\n# Dont load the 'taggit' app, since we have our own custom `Tag` and `TaggedItem` models\nSHELL_PLUS_DONT_LOAD = [\"taggit\"]\n\n#\n# UI settings\n#\n\n\n# UI_RACK_VIEW_TRUNCATE_FUNCTION\ndef UI_RACK_VIEW_TRUNCATE_FUNCTION(device_display_name):\n \"\"\"Given device display name, truncate to fit the rack elevation view.\n\n :param device_display_name: Full display name of the device attempting to be rendered in the rack elevation.\n :type device_display_name: str\n\n :return: Truncated device name\n :type: str\n \"\"\"\n return str(device_display_name).split(\".\")[0]\n",
"path": "nautobot/core/settings.py"
}
] | [
{
"content": "import os\nimport platform\nimport re\n\nfrom django.contrib.messages import constants as messages\nimport django.forms\n\nfrom nautobot import __version__\nfrom nautobot.core.settings_funcs import is_truthy, parse_redis_connection # noqa: F401\n\n#\n# Environment setup\n#\n\n# This is used for display in the UI.\nVERSION = __version__\n\n# Hostname of the system. This is displayed in the web UI footers along with the\n# version.\nHOSTNAME = platform.node()\n\n# Set the base directory two levels up (i.e. the base nautobot/ directory)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# Set the swapable User model to the Nautobot custom User model\nAUTH_USER_MODEL = \"users.User\"\n\n# Set the default AutoField for 3rd party apps\n# N.B. Ideally this would be a `UUIDField`, but due to Django restrictions\n# we can’t do that yet\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n\n\n###############################################################\n# NAUTOBOT - Settings for Nautobot internals/plugins/defaults #\n###############################################################\n\n#\n# Nautobot optional settings/defaults\n#\nALLOWED_URL_SCHEMES = (\n \"file\",\n \"ftp\",\n \"ftps\",\n \"http\",\n \"https\",\n \"irc\",\n \"mailto\",\n \"sftp\",\n \"ssh\",\n \"tel\",\n \"telnet\",\n \"tftp\",\n \"vnc\",\n \"xmpp\",\n)\n\n# Base directory wherein all created files (jobs, git repositories, file uploads, static files) will be stored)\nNAUTOBOT_ROOT = os.getenv(\"NAUTOBOT_ROOT\", os.path.expanduser(\"~/.nautobot\"))\n\n# By default, Nautobot will permit users to create duplicate prefixes and IP addresses in the global\n# table (that is, those which are not assigned to any VRF). This behavior can be disabled by setting\n# ENFORCE_GLOBAL_UNIQUE to True.\nENFORCE_GLOBAL_UNIQUE = False\n\n# Exclude potentially sensitive models from wildcard view exemption. These may still be exempted\n# by specifying the model individually in the EXEMPT_VIEW_PERMISSIONS configuration parameter.\nEXEMPT_EXCLUDE_MODELS = (\n (\"auth\", \"group\"),\n (\"users\", \"user\"),\n (\"users\", \"objectpermission\"),\n)\n\nEXEMPT_VIEW_PERMISSIONS = []\nGIT_ROOT = os.getenv(\"NAUTOBOT_GIT_ROOT\", os.path.join(NAUTOBOT_ROOT, \"git\").rstrip(\"/\"))\nHTTP_PROXIES = None\nJOBS_ROOT = os.getenv(\"NAUTOBOT_JOBS_ROOT\", os.path.join(NAUTOBOT_ROOT, \"jobs\").rstrip(\"/\"))\nMAINTENANCE_MODE = False\n# Metrics\nMETRICS_ENABLED = False\n\n# Napalm\nNAPALM_ARGS = {}\nNAPALM_PASSWORD = os.getenv(\"NAUTOBOT_NAPALM_PASSWORD\", \"\")\nNAPALM_TIMEOUT = int(os.getenv(\"NAUTOBOT_NAPALM_TIMEOUT\", \"30\"))\nNAPALM_USERNAME = os.getenv(\"NAUTOBOT_NAPALM_USERNAME\", \"\")\n\n# Plugins\nPLUGINS = []\nPLUGINS_CONFIG = {}\n\n# Global 3rd-party authentication settings\nEXTERNAL_AUTH_DEFAULT_GROUPS = []\nEXTERNAL_AUTH_DEFAULT_PERMISSIONS = {}\n\n# Remote auth backend settings\nREMOTE_AUTH_AUTO_CREATE_USER = False\nREMOTE_AUTH_HEADER = \"HTTP_REMOTE_USER\"\n\n# SSO backend settings https://python-social-auth.readthedocs.io/en/latest/configuration/settings.html\nSOCIAL_AUTH_POSTGRES_JSONFIELD = False\n# Nautobot related - May be overridden if using custom social auth backend\nSOCIAL_AUTH_BACKEND_PREFIX = \"social_core.backends\"\n\n# Job log entry sanitization and similar\nSANITIZER_PATTERNS = [\n # General removal of username-like and password-like tokens\n (re.compile(r\"(https?://)?\\S+\\s*@\", re.IGNORECASE), r\"\\1{replacement}@\"),\n (re.compile(r\"(username|password|passwd|pwd)(\\s*i?s?\\s*:?\\s*)?\\S+\", re.IGNORECASE), r\"\\1\\2{replacement}\"),\n]\n\n# Storage\nSTORAGE_BACKEND = None\nSTORAGE_CONFIG = {}\n\n# Test runner that is aware of our use of \"integration\" tags and only runs\n# integration tests if explicitly passed in with `nautobot-server test --tag integration`.\nTEST_RUNNER = \"nautobot.core.tests.runner.NautobotTestRunner\"\n\n#\n# Django cryptography\n#\n\n# CRYPTOGRAPHY_BACKEND = cryptography.hazmat.backends.default_backend()\n# CRYPTOGRAPHY_DIGEST = cryptography.hazmat.primitives.hashes.SHA256\nCRYPTOGRAPHY_KEY = None # Defaults to SECRET_KEY if unset\nCRYPTOGRAPHY_SALT = \"nautobot-cryptography\"\n\n\n#\n# Django Prometheus\n#\n\nPROMETHEUS_EXPORT_MIGRATIONS = False\n\n\n#\n# Django filters\n#\n\nFILTERS_NULL_CHOICE_LABEL = \"None\"\nFILTERS_NULL_CHOICE_VALUE = \"null\"\n\nSTRICT_FILTERING = True\n\n#\n# Django REST framework (API)\n#\n\nREST_FRAMEWORK_VERSION = VERSION.rsplit(\".\", 1)[0] # Use major.minor as API version\ncurrent_major, current_minor = REST_FRAMEWORK_VERSION.split(\".\")\n# We support all major.minor API versions from 1.2 to the present latest version.\n# This will need to be elaborated upon when we move to version 2.0\n# Similar logic exists in tasks.py, please keep them in sync!\nassert current_major == \"1\", f\"REST_FRAMEWORK_ALLOWED_VERSIONS needs to be updated to handle version {current_major}\"\nREST_FRAMEWORK_ALLOWED_VERSIONS = [f\"{current_major}.{minor}\" for minor in range(2, int(current_minor) + 1)]\n\nREST_FRAMEWORK = {\n \"ALLOWED_VERSIONS\": REST_FRAMEWORK_ALLOWED_VERSIONS,\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.SessionAuthentication\",\n \"nautobot.core.api.authentication.TokenAuthentication\",\n ),\n \"DEFAULT_FILTER_BACKENDS\": (\"nautobot.core.api.filter_backends.NautobotFilterBackend\",),\n \"DEFAULT_METADATA_CLASS\": \"nautobot.core.api.metadata.BulkOperationMetadata\",\n \"DEFAULT_PAGINATION_CLASS\": \"nautobot.core.api.pagination.OptionalLimitOffsetPagination\",\n \"DEFAULT_PERMISSION_CLASSES\": (\"nautobot.core.api.authentication.TokenPermissions\",),\n \"DEFAULT_RENDERER_CLASSES\": (\n \"rest_framework.renderers.JSONRenderer\",\n \"nautobot.core.api.renderers.FormlessBrowsableAPIRenderer\",\n ),\n \"DEFAULT_SCHEMA_CLASS\": \"nautobot.core.api.schema.NautobotAutoSchema\",\n # Version to use if the client doesn't request otherwise.\n # This should only change (if at all) with Nautobot major (breaking) releases.\n \"DEFAULT_VERSION\": \"1.2\",\n \"DEFAULT_VERSIONING_CLASS\": \"nautobot.core.api.versioning.NautobotAPIVersioning\",\n \"PAGE_SIZE\": None,\n \"SCHEMA_COERCE_METHOD_NAMES\": {\n # Default mappings\n \"retrieve\": \"read\",\n \"destroy\": \"delete\",\n # Custom operations\n \"bulk_destroy\": \"bulk_delete\",\n },\n \"VIEW_NAME_FUNCTION\": \"nautobot.utilities.api.get_view_name\",\n}\n\n\n#\n# drf_spectacular (OpenAPI/Swagger)\n#\n\nSPECTACULAR_SETTINGS = {\n \"TITLE\": \"API Documentation\",\n \"DESCRIPTION\": \"Source of truth and network automation platform\",\n \"LICENSE\": {\"name\": \"Apache v2 License\"},\n \"VERSION\": VERSION,\n # For a semblance of backwards-compatibility with drf-yasg / OpenAPI 2.0, where \"/api\" was a common \"basePath\"\n # in the schema.\n # OpenAPI 3.0 removes \"basePath\" in favor of \"servers\", so we now declare \"/api\" as the server relative URL and\n # trim it from all of the individual paths correspondingly.\n # See also https://github.com/nautobot/nautobot-ansible/pull/135 for an example of why this is desirable.\n \"SERVERS\": [{\"url\": \"/api\"}],\n \"SCHEMA_PATH_PREFIX\": \"/api\",\n \"SCHEMA_PATH_PREFIX_TRIM\": True,\n # use sidecar - locally packaged UI files, not CDN\n \"SWAGGER_UI_DIST\": \"SIDECAR\",\n \"SWAGGER_UI_FAVICON_HREF\": \"SIDECAR\",\n \"REDOC_DIST\": \"SIDECAR\",\n \"ENUM_NAME_OVERRIDES\": {\n # These choice enums need to be overridden because they get assigned to the `type` field and\n # result in this error:\n # enum naming encountered a non-optimally resolvable collision for fields named \"type\".\n \"CableTypeChoices\": \"nautobot.dcim.choices.CableTypeChoices\",\n \"ConsolePortTypeChoices\": \"nautobot.dcim.choices.ConsolePortTypeChoices\",\n \"CustomFieldTypeChoices\": \"nautobot.extras.choices.CustomFieldTypeChoices\",\n \"InterfaceTypeChoices\": \"nautobot.dcim.choices.InterfaceTypeChoices\",\n \"PortTypeChoices\": \"nautobot.dcim.choices.PortTypeChoices\",\n \"PowerFeedTypeChoices\": \"nautobot.dcim.choices.PowerFeedTypeChoices\",\n \"PowerOutletTypeChoices\": \"nautobot.dcim.choices.PowerOutletTypeChoices\",\n \"PowerPortTypeChoices\": \"nautobot.dcim.choices.PowerPortTypeChoices\",\n \"RackTypeChoices\": \"nautobot.dcim.choices.RackTypeChoices\",\n \"RelationshipTypeChoices\": \"nautobot.extras.choices.RelationshipTypeChoices\",\n # Each of these StatusModels has bulk and non-bulk serializers, with the same status options,\n # which confounds drf-spectacular's automatic naming of enums, resulting in the below warning:\n # enum naming encountered a non-optimally resolvable collision for fields named \"status\"\n # By explicitly naming the enums ourselves we avoid this warning.\n \"CableStatusChoices\": \"nautobot.dcim.api.serializers.CableSerializer.status_choices\",\n \"CircuitStatusChoices\": \"nautobot.circuits.api.serializers.CircuitSerializer.status_choices\",\n \"DeviceStatusChoices\": \"nautobot.dcim.api.serializers.DeviceWithConfigContextSerializer.status_choices\",\n \"InterfaceStatusChoices\": \"nautobot.dcim.api.serializers.InterfaceSerializer.status_choices\",\n \"IPAddressStatusChoices\": \"nautobot.ipam.api.serializers.IPAddressSerializer.status_choices\",\n \"LocationStatusChoices\": \"nautobot.dcim.api.serializers.LocationSerializer.status_choices\",\n \"PowerFeedStatusChoices\": \"nautobot.dcim.api.serializers.PowerFeedSerializer.status_choices\",\n \"PrefixStatusChoices\": \"nautobot.ipam.api.serializers.PrefixSerializer.status_choices\",\n \"RackStatusChoices\": \"nautobot.dcim.api.serializers.RackSerializer.status_choices\",\n \"VirtualMachineStatusChoices\": \"nautobot.virtualization.api.serializers.VirtualMachineWithConfigContextSerializer.status_choices\",\n \"VLANStatusChoices\": \"nautobot.ipam.api.serializers.VLANSerializer.status_choices\",\n },\n # Create separate schema components for PATCH requests (fields generally are not `required` on PATCH)\n \"COMPONENT_SPLIT_PATCH\": True,\n # Create separate schema components for request vs response where appropriate\n \"COMPONENT_SPLIT_REQUEST\": True,\n}\n\n\n##############################################\n# DJANGO - Core settings required for Django #\n##############################################\n\n#\n# Databases\n#\n\n# Only PostgresSQL is supported, so database driver is hard-coded. This can\n# still be overloaded in custom settings.\n# https://docs.djangoproject.com/en/stable/ref/settings/#databases\nDATABASES = {\n \"default\": {\n \"NAME\": os.getenv(\"NAUTOBOT_DATABASE\", \"nautobot\"),\n \"USER\": os.getenv(\"NAUTOBOT_USER\", \"\"),\n \"PASSWORD\": os.getenv(\"NAUTOBOT_PASSWORD\", \"\"),\n \"HOST\": os.getenv(\"NAUTOBOT_DB_HOST\", \"localhost\"),\n \"PORT\": os.getenv(\"NAUTOBOT_DB_PORT\", \"\"),\n \"CONN_MAX_AGE\": int(os.getenv(\"NAUTOBOT_DB_TIMEOUT\", \"300\")),\n \"ENGINE\": os.getenv(\"NAUTOBOT_DB_ENGINE\", \"django.db.backends.postgresql\"),\n }\n}\n\n# The secret key is used to encrypt session keys and salt passwords.\nSECRET_KEY = os.getenv(\"SECRET_KEY\")\n\n# Default overrides\nALLOWED_HOSTS = []\nCSRF_TRUSTED_ORIGINS = []\nDATETIME_FORMAT = \"N j, Y g:i a\"\nINTERNAL_IPS = (\"127.0.0.1\", \"::1\")\nFORCE_SCRIPT_NAME = None\nLOGGING = {}\nMEDIA_ROOT = os.path.join(NAUTOBOT_ROOT, \"media\").rstrip(\"/\")\nSESSION_FILE_PATH = None\nSHORT_DATE_FORMAT = \"Y-m-d\"\nSHORT_DATETIME_FORMAT = \"Y-m-d H:i\"\nTIME_FORMAT = \"g:i a\"\nTIME_ZONE = \"UTC\"\n\n# Disable importing the WSGI module before starting the server application. This is required for\n# uWSGI postfork callbacks to execute as is currently required in `nautobot.core.wsgi`.\nWEBSERVER_WARMUP = False\n\n# Installed apps and Django plugins. Nautobot plugins will be appended here later.\nINSTALLED_APPS = [\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"cacheops\",\n \"corsheaders\",\n \"django_filters\",\n \"django_jinja\",\n \"django_tables2\",\n \"django_prometheus\",\n \"mptt\",\n \"social_django\",\n \"taggit\",\n \"timezone_field\",\n \"nautobot.core.apps.NautobotConstanceConfig\", # overridden form of \"constance\" AppConfig\n \"nautobot.core\",\n \"django.contrib.admin\", # Must be after `nautobot.core` for template overrides\n \"django_celery_beat\", # Must be after `nautobot.core` for template overrides\n \"rest_framework\", # Must be after `nautobot.core` for template overrides\n \"db_file_storage\",\n \"nautobot.circuits\",\n \"nautobot.dcim\",\n \"nautobot.ipam\",\n \"nautobot.extras\",\n \"nautobot.tenancy\",\n \"nautobot.users\",\n \"nautobot.utilities\",\n \"nautobot.virtualization\",\n \"django_rq\", # Must come after nautobot.extras to allow overriding management commands\n \"drf_spectacular\",\n \"drf_spectacular_sidecar\",\n \"graphene_django\",\n \"health_check\",\n \"health_check.storage\",\n \"django_extensions\",\n \"nautobot.core.apps.ConstanceDatabaseAppConfig\", # fix default_auto_field\n \"django_ajax_tables\",\n]\n\n# Middleware\nMIDDLEWARE = [\n \"django_prometheus.middleware.PrometheusBeforeMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"nautobot.core.middleware.ExceptionHandlingMiddleware\",\n \"nautobot.core.middleware.RemoteUserMiddleware\",\n \"nautobot.core.middleware.ExternalAuthMiddleware\",\n \"nautobot.core.middleware.ObjectChangeMiddleware\",\n \"django_prometheus.middleware.PrometheusAfterMiddleware\",\n]\n\nROOT_URLCONF = \"nautobot.core.urls\"\n\nTEMPLATES = [\n {\n \"NAME\": \"django\",\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.template.context_processors.media\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"social_django.context_processors.backends\",\n \"social_django.context_processors.login_redirect\",\n \"nautobot.core.context_processors.settings\",\n \"nautobot.core.context_processors.sso_auth\",\n ],\n },\n },\n {\n \"NAME\": \"jinja\",\n \"BACKEND\": \"django_jinja.backend.Jinja2\",\n \"DIRS\": [],\n \"APP_DIRS\": False,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.template.context_processors.media\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"social_django.context_processors.backends\",\n \"social_django.context_processors.login_redirect\",\n \"nautobot.core.context_processors.settings\",\n \"nautobot.core.context_processors.sso_auth\",\n ],\n },\n },\n]\n\n# Set up authentication backends\nAUTHENTICATION_BACKENDS = [\n # Always check object permissions\n \"nautobot.core.authentication.ObjectPermissionBackend\",\n]\n\n# Internationalization\nLANGUAGE_CODE = \"en-us\"\nUSE_I18N = True\nUSE_TZ = True\n\n# WSGI\nWSGI_APPLICATION = \"nautobot.core.wsgi.application\"\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\nUSE_X_FORWARDED_HOST = True\nX_FRAME_OPTIONS = \"DENY\"\n\n# Static files (CSS, JavaScript, Images)\nSTATIC_ROOT = os.path.join(NAUTOBOT_ROOT, \"static\")\nSTATIC_URL = \"static/\"\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"project-static\"),)\n\n# Media\nMEDIA_URL = \"media/\"\n\n# Disable default limit of 1000 fields per request. Needed for bulk deletion of objects. (Added in Django 1.10.)\nDATA_UPLOAD_MAX_NUMBER_FIELDS = None\n\n# Messages\nMESSAGE_TAGS = {\n messages.ERROR: \"danger\",\n}\n\n# Authentication URLs\n# This is the URL route name for the login view.\nLOGIN_URL = \"login\"\n\n# This is the URL route name for the home page (index) view.\nLOGIN_REDIRECT_URL = \"home\"\n\n#\n# django-constance\n#\n\nCONSTANCE_BACKEND = \"constance.backends.database.DatabaseBackend\"\nCONSTANCE_DATABASE_PREFIX = \"constance:nautobot:\"\nCONSTANCE_IGNORE_ADMIN_VERSION_CHECK = True # avoid potential errors in a multi-node deployment\n\nCONSTANCE_ADDITIONAL_FIELDS = {\n \"per_page_defaults_field\": [\n \"nautobot.utilities.forms.fields.JSONArrayFormField\",\n {\n \"widget\": \"django.forms.TextInput\",\n \"base_field\": django.forms.IntegerField(min_value=1),\n },\n ],\n \"release_check_timeout_field\": [\n \"django.forms.IntegerField\",\n {\n \"min_value\": 3600,\n },\n ],\n \"release_check_url_field\": [\n \"django.forms.URLField\",\n {\n \"required\": False,\n },\n ],\n}\n\nCONSTANCE_CONFIG = {\n \"BANNER_BOTTOM\": [\n \"\",\n \"Custom HTML to display in a banner at the bottom of all pages.\",\n ],\n \"BANNER_LOGIN\": [\n \"\",\n \"Custom HTML to display in a banner at the top of the login page.\",\n ],\n \"BANNER_TOP\": [\n \"\",\n \"Custom HTML to display in a banner at the top of all pages.\",\n ],\n \"CHANGELOG_RETENTION\": [\n 90,\n \"Number of days to retain object changelog history.\\nSet this to 0 to retain changes indefinitely.\",\n ],\n \"DISABLE_PREFIX_LIST_HIERARCHY\": [\n False,\n \"Disable rendering parent/child relationships in the IPAM Prefix list view and instead show a flat list.\",\n ],\n \"HIDE_RESTRICTED_UI\": [\n False,\n \"If set to True, users with limited permissions will not be shown menu items and home-page elements that \"\n \"they do not have permission to access.\",\n ],\n \"MAX_PAGE_SIZE\": [\n 1000,\n \"Maximum number of objects that a user can list in one UI page or one API call.\\n\"\n \"If set to 0, a user can retrieve an unlimited number of objects.\",\n ],\n \"PAGINATE_COUNT\": [\n 50,\n \"Default number of objects to display per page when listing objects in the UI and/or REST API.\",\n ],\n \"PER_PAGE_DEFAULTS\": [\n [25, 50, 100, 250, 500, 1000],\n \"Pagination options to present to the user to choose amongst.\\n\"\n \"For proper user experience, this list should include the PAGINATE_COUNT and MAX_PAGE_SIZE values as options.\",\n # Use custom field type defined above\n \"per_page_defaults_field\",\n ],\n \"PREFER_IPV4\": [\n False,\n \"Whether to prefer IPv4 primary addresses over IPv6 primary addresses for devices.\",\n ],\n \"RACK_ELEVATION_DEFAULT_UNIT_HEIGHT\": [\n 22,\n \"Default height (in pixels) of a rack unit in a rack elevation diagram\",\n ],\n \"RACK_ELEVATION_DEFAULT_UNIT_WIDTH\": [\n 230,\n \"Default width (in pixels) of a rack unit in a rack elevation diagram\",\n ],\n \"RELEASE_CHECK_TIMEOUT\": [\n 24 * 3600,\n \"Number of seconds (must be at least 3600, or one hour) to cache the result of a release check \"\n \"before checking again for a new release.\",\n # Use custom field type defined above\n \"release_check_timeout_field\",\n ],\n \"RELEASE_CHECK_URL\": [\n \"\",\n \"URL of GitHub repository REST API endpoint to poll periodically for availability of new Nautobot releases.\\n\"\n 'This can be set to the official repository \"https://api.github.com/repos/nautobot/nautobot/releases\" or '\n \"a custom fork.\\nSet this to an empty string to disable automatic update checks.\",\n # Use custom field type defined above\n \"release_check_url_field\",\n ],\n}\n\nCONSTANCE_CONFIG_FIELDSETS = {\n \"Banners\": [\"BANNER_LOGIN\", \"BANNER_TOP\", \"BANNER_BOTTOM\"],\n \"Change Logging\": [\"CHANGELOG_RETENTION\"],\n \"Device Connectivity\": [\"PREFER_IPV4\"],\n \"Pagination\": [\"PAGINATE_COUNT\", \"MAX_PAGE_SIZE\", \"PER_PAGE_DEFAULTS\"],\n \"Rack Elevation Rendering\": [\"RACK_ELEVATION_DEFAULT_UNIT_HEIGHT\", \"RACK_ELEVATION_DEFAULT_UNIT_WIDTH\"],\n \"Release Checking\": [\"RELEASE_CHECK_URL\", \"RELEASE_CHECK_TIMEOUT\"],\n \"User Interface\": [\"DISABLE_PREFIX_LIST_HIERARCHY\", \"HIDE_RESTRICTED_UI\"],\n}\n\n#\n# From django-cors-headers\n#\n\n# If True, all origins will be allowed. Other settings restricting allowed origins will be ignored.\n# Defaults to False. Setting this to True can be dangerous, as it allows any website to make\n# cross-origin requests to yours. Generally you'll want to restrict the list of allowed origins with\n# CORS_ALLOWED_ORIGINS or CORS_ALLOWED_ORIGIN_REGEXES.\nCORS_ALLOW_ALL_ORIGINS = False\n\n# A list of strings representing regexes that match Origins that are authorized to make cross-site\n# HTTP requests. Defaults to [].\nCORS_ALLOWED_ORIGIN_REGEXES = []\n\n# A list of origins that are authorized to make cross-site HTTP requests. Defaults to [].\nCORS_ALLOWED_ORIGINS = []\n\n#\n# GraphQL\n#\n\nGRAPHENE = {\n \"SCHEMA\": \"nautobot.core.graphql.schema_init.schema\",\n \"DJANGO_CHOICE_FIELD_ENUM_V3_NAMING\": True, # any field with a name of type will break in Graphene otherwise.\n}\nGRAPHQL_CUSTOM_FIELD_PREFIX = \"cf\"\nGRAPHQL_RELATIONSHIP_PREFIX = \"rel\"\nGRAPHQL_COMPUTED_FIELD_PREFIX = \"cpf\"\n\n\n#\n# Caching\n#\n\n# The django-cacheops plugin is used to cache querysets. The built-in Django\n# caching is not used.\nCACHEOPS = {\n \"auth.user\": {\"ops\": \"get\", \"timeout\": 60 * 15},\n \"auth.*\": {\"ops\": (\"fetch\", \"get\")},\n \"auth.permission\": {\"ops\": \"all\"},\n \"circuits.*\": {\"ops\": \"all\"},\n \"dcim.inventoryitem\": None, # MPTT models are exempt due to raw SQL\n \"dcim.region\": None, # MPTT models are exempt due to raw SQL\n \"dcim.rackgroup\": None, # MPTT models are exempt due to raw SQL\n \"dcim.*\": {\"ops\": \"all\"},\n \"ipam.*\": {\"ops\": \"all\"},\n \"extras.*\": {\"ops\": \"all\"},\n \"users.*\": {\"ops\": \"all\"},\n \"tenancy.tenantgroup\": None, # MPTT models are exempt due to raw SQL\n \"tenancy.*\": {\"ops\": \"all\"},\n \"virtualization.*\": {\"ops\": \"all\"},\n}\nCACHEOPS_DEGRADE_ON_FAILURE = True\nCACHEOPS_ENABLED = True\nCACHEOPS_REDIS = \"redis://localhost:6379/1\"\nCACHEOPS_DEFAULTS = {\"timeout\": 900}\n\n# The django-redis cache is used to establish concurrent locks using Redis. The\n# django-rq settings will use the same instance/database by default.\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": \"redis://localhost:6379/0\",\n \"TIMEOUT\": 300,\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n \"PASSWORD\": \"\",\n },\n }\n}\n\n#\n# Django RQ (used for legacy background processesing)\n#\n\n# These defaults utilize the Django caches setting defined for django-redis.\n# See: https://github.com/rq/django-rq#support-for-django-redis-and-django-redis-cache\nRQ_QUEUES = {\n \"default\": {\n \"USE_REDIS_CACHE\": \"default\",\n },\n \"check_releases\": {\n \"USE_REDIS_CACHE\": \"default\",\n },\n \"custom_fields\": {\n \"USE_REDIS_CACHE\": \"default\",\n },\n \"webhooks\": {\n \"USE_REDIS_CACHE\": \"default\",\n },\n}\n\n#\n# Celery (used for background processing)\n#\n\n# Celery broker URL used to tell workers where queues are located\nCELERY_BROKER_URL = os.getenv(\"NAUTOBOT_CELERY_BROKER_URL\", parse_redis_connection(redis_database=0))\n\n# Celery results backend URL to tell workers where to publish task results\nCELERY_RESULT_BACKEND = os.getenv(\"NAUTOBOT_CELERY_RESULT_BACKEND\", parse_redis_connection(redis_database=0))\n\n# Instruct celery to report the started status of a job, instead of just `pending`, `finished`, or `failed`\nCELERY_TASK_TRACK_STARTED = True\n\n# Global task time limits (seconds)\n# Exceeding the soft limit will result in a SoftTimeLimitExceeded exception,\n# while exceeding the hard limit will result in a SIGKILL.\nCELERY_TASK_SOFT_TIME_LIMIT = int(os.getenv(\"NAUTOBOT_CELERY_TASK_SOFT_TIME_LIMIT\", str(5 * 60)))\nCELERY_TASK_TIME_LIMIT = int(os.getenv(\"NAUTOBOT_CELERY_TASK_TIME_LIMIT\", str(10 * 60)))\n\n# These settings define the custom nautobot serialization encoding as an accepted data encoding format\n# and register that format for task input and result serialization\nCELERY_ACCEPT_CONTENT = [\"nautobot_json\"]\nCELERY_RESULT_ACCEPT_CONTENT = [\"nautobot_json\"]\nCELERY_TASK_SERIALIZER = \"nautobot_json\"\nCELERY_RESULT_SERIALIZER = \"nautobot_json\"\n\nCELERY_BEAT_SCHEDULER = \"nautobot.core.celery.schedulers:NautobotDatabaseScheduler\"\n\n# Sets an age out timer of redis lock. This is NOT implicitially applied to locks, must be added\n# to a lock creation as `timeout=settings.REDIS_LOCK_TIMEOUT`\nREDIS_LOCK_TIMEOUT = int(os.getenv(\"NAUTOBOT_REDIS_LOCK_TIMEOUT\", \"600\"))\n\n#\n# Custom branding (logo and title)\n#\n\n# Branding logo locations. The logo takes the place of the Nautobot logo in the top right of the nav bar.\n# The filepath should be relative to the `MEDIA_ROOT`.\nBRANDING_FILEPATHS = {\n \"logo\": os.getenv(\"NAUTOBOT_BRANDING_FILEPATHS_LOGO\", None), # Navbar logo\n \"favicon\": os.getenv(\"NAUTOBOT_BRANDING_FILEPATHS_FAVICON\", None), # Browser favicon\n \"icon_16\": os.getenv(\"NAUTOBOT_BRANDING_FILEPATHS_ICON_16\", None), # 16x16px icon\n \"icon_32\": os.getenv(\"NAUTOBOT_BRANDING_FILEPATHS_ICON_32\", None), # 32x32px icon\n \"icon_180\": os.getenv(\n \"NAUTOBOT_BRANDING_FILEPATHS_ICON_180\", None\n ), # 180x180px icon - used for the apple-touch-icon header\n \"icon_192\": os.getenv(\"NAUTOBOT_BRANDING_FILEPATHS_ICON_192\", None), # 192x192px icon\n \"icon_mask\": os.getenv(\n \"NAUTOBOT_BRANDING_FILEPATHS_ICON_MASK\", None\n ), # mono-chrome icon used for the mask-icon header\n}\n\n# Title to use in place of \"Nautobot\"\nBRANDING_TITLE = os.getenv(\"NAUTOBOT_BRANDING_TITLE\", \"Nautobot\")\n\n# Prepended to CSV, YAML and export template filenames (i.e. `nautobot_device.yml`)\nBRANDING_PREPENDED_FILENAME = os.getenv(\"NAUTOBOT_BRANDING_PREPENDED_FILENAME\", \"nautobot_\")\n\n# Branding URLs (links in the bottom right of the footer)\nBRANDING_URLS = {\n \"code\": os.getenv(\"NAUTOBOT_BRANDING_URLS_CODE\", \"https://github.com/nautobot/nautobot\"),\n \"docs\": os.getenv(\"NAUTOBOT_BRANDING_URLS_DOCS\", None),\n \"help\": os.getenv(\"NAUTOBOT_BRANDING_URLS_HELP\", \"https://github.com/nautobot/nautobot/wiki\"),\n}\n\n# Undocumented link in the bottom right of the footer which is meant to persist any custom branding changes.\nBRANDING_POWERED_BY_URL = \"https://docs.nautobot.com/\"\n\n#\n# Django extensions settings\n#\n\n# Dont load the 'taggit' app, since we have our own custom `Tag` and `TaggedItem` models\nSHELL_PLUS_DONT_LOAD = [\"taggit\"]\n\n#\n# UI settings\n#\n\n\n# UI_RACK_VIEW_TRUNCATE_FUNCTION\ndef UI_RACK_VIEW_TRUNCATE_FUNCTION(device_display_name):\n \"\"\"Given device display name, truncate to fit the rack elevation view.\n\n :param device_display_name: Full display name of the device attempting to be rendered in the rack elevation.\n :type device_display_name: str\n\n :return: Truncated device name\n :type: str\n \"\"\"\n return str(device_display_name).split(\".\")[0]\n",
"path": "nautobot/core/settings.py"
}
] | diff --git a/changes/2574.fixed b/changes/2574.fixed
new file mode 100644
index 00000000000..6c7e405357d
--- /dev/null
+++ b/changes/2574.fixed
@@ -0,0 +1 @@
+Updated nautobot.core.settings to match expected behavior on NAPALM_USERNAME NAPALM_PASSWORD & NAPALM_TIMEOUT based on documentation.
\ No newline at end of file
diff --git a/nautobot/core/settings.py b/nautobot/core/settings.py
index 274a3c45997..ac3b40de7a8 100644
--- a/nautobot/core/settings.py
+++ b/nautobot/core/settings.py
@@ -81,9 +81,9 @@
# Napalm
NAPALM_ARGS = {}
-NAPALM_PASSWORD = ""
-NAPALM_TIMEOUT = 30
-NAPALM_USERNAME = ""
+NAPALM_PASSWORD = os.getenv("NAUTOBOT_NAPALM_PASSWORD", "")
+NAPALM_TIMEOUT = int(os.getenv("NAUTOBOT_NAPALM_TIMEOUT", "30"))
+NAPALM_USERNAME = os.getenv("NAUTOBOT_NAPALM_USERNAME", "")
# Plugins
PLUGINS = []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.